code stringlengths 2.5k 150k | kind stringclasses 1 value |
|---|---|
```
import time
start = time.perf_counter()
import tensorflow as tf
import pickle
import import_ipynb
import os
from model import Model
from utils import build_dict, build_dataset, batch_iter
embedding_size=300
num_hidden = 300
num_layers = 3
learning_rate = 0.001
beam_width = 10
keep_prob = 0.8
glove = True
batch_size=256
num_epochs=10
if not os.path.exists("saved_model"):
os.mkdir("saved_model")
else:
old_model_checkpoint_path = open('saved_model/checkpoint', 'r')
old_model_checkpoint_path = "".join(["saved_model/",old_model_checkpoint_path.read().splitlines()[0].split('"')[1]])
print("Building dictionary...")
word_dict, reversed_dict, article_max_len, summary_max_len = build_dict("train", toy=True)
print("Loading training dataset...")
train_x, train_y = build_dataset("train", word_dict, article_max_len, summary_max_len, toy=True)
with tf.Session() as sess:
model = Model(reversed_dict, article_max_len, summary_max_len, embedding_size, num_hidden, num_layers, learning_rate, beam_width, keep_prob, glove)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
if 'old_model_checkpoint_path' in globals():
print("Continuing from previous trained model:" , old_model_checkpoint_path , "...")
saver.restore(sess, old_model_checkpoint_path )
batches = batch_iter(train_x, train_y, batch_size, num_epochs)
num_batches_per_epoch = (len(train_x) - 1) // batch_size + 1
print("\nIteration starts.")
print("Number of batches per epoch :", num_batches_per_epoch)
for batch_x, batch_y in batches:
batch_x_len = list(map(lambda x: len([y for y in x if y != 0]), batch_x))
batch_decoder_input = list(map(lambda x: [word_dict["<s>"]] + list(x), batch_y))
batch_decoder_len = list(map(lambda x: len([y for y in x if y != 0]), batch_decoder_input))
batch_decoder_output = list(map(lambda x: list(x) + [word_dict["</s>"]], batch_y))
batch_decoder_input = list(
map(lambda d: d + (summary_max_len - len(d)) * [word_dict["<padding>"]], batch_decoder_input))
batch_decoder_output = list(
map(lambda d: d + (summary_max_len - len(d)) * [word_dict["<padding>"]], batch_decoder_output))
train_feed_dict = {
model.batch_size: len(batch_x),
model.X: batch_x,
model.X_len: batch_x_len,
model.decoder_input: batch_decoder_input,
model.decoder_len: batch_decoder_len,
model.decoder_target: batch_decoder_output
}
_, step, loss = sess.run([model.update, model.global_step, model.loss], feed_dict=train_feed_dict)
if step % 1000 == 0:
print("step {0}: loss = {1}".format(step, loss))
if step % num_batches_per_epoch == 0:
hours, rem = divmod(time.perf_counter() - start, 3600)
minutes, seconds = divmod(rem, 60)
saver.save(sess, "./saved_model/model.ckpt", global_step=step)
print(" Epoch {0}: Model is saved.".format(step // num_batches_per_epoch),
"Elapsed: {:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds) , "\n")
```
| github_jupyter |
```
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df_ice = pd.read_csv('input/icecream.csv', skiprows=2,header=None)
df_ice.columns = ['year', 'month', 'expenditure_yen']
y = pd.Series(df_ice.expenditure_yen.values, index=pd.date_range('2003-1', periods=len(df_ice), freq='M'))
y.plot()
from statsmodels.tsa import stattools
ctt = stattools.adfuller(y[:100], regression='ctt')
ct = stattools.adfuller(y[:100], regression='ct')
c = stattools.adfuller(y[:100], regression='c')
nc = stattools.adfuller(y[:100], regression='nc')
print(ctt)
print(ct)
print(c)
print(nc)
y_diff = y.diff()[:100].dropna()
y_diff.plot()
ctt = stattools.adfuller(y_diff, regression='ctt')
ct = stattools.adfuller(y_diff, regression='ct')
c = stattools.adfuller(y_diff, regression='c')
nc = stattools.adfuller(y_diff, regression='nc')
print(ctt)
print(ct)
print(c)
print(nc)
from statsmodels.tsa import ar_model
model = ar_model.AR(y_diff)
for i in range(20):
results = model.fit(maxlag=i+1)
model = ar_model.AR(y_diff)
print('lag = ', i+1, 'aic : ', results.aic)
model = ar_model.AR(y_diff)
results11 = model.fit(maxlag=12, ic='aic')
results11.k_ar
res11 = results11.resid
plt.bar(range(len(res11)), res11)
from statsmodels.graphics import tsaplots
tsaplots.plot_pacf(res11, lags=40)
plt.plot(y.diff().dropna().values, label='observation')
plt.plot(np.hstack([y_diff[:11], results11.fittedvalues, results11.predict(98-11, 107, dynamic=True)]), '--', label='forecast')
from statsmodels.tsa import stattools
info_criteria = stattools.arma_order_select_ic(y_diff, ic=['aic', 'bic'])
info_criteria.aic_min_order, info_criteria.bic_min_order
from statsmodels.tsa.arima_model import ARMA
model = ARMA(y_diff, (2,2))
results = model.fit()
res = results.resid
plt.bar(range(len(res)), res)
from statsmodels.graphics import tsaplots
tsaplots.plot_pacf(res, lags=40)
plt.plot(y.diff().dropna().values, label='observation')
plt.plot(np.hstack([y_diff[:2],results.fittedvalues,results.predict(99-2,115,dynamic=True)]),'--',label='forecast')
y = pd.Series(np.random.randn(1000), index=pd.date_range('2000-1-1', periods=1000))
y = y.cumsum()
y.plot()
from statsmodels.tsa import stattools
ctt = stattools.adfuller(y, regression='ctt')
ct = stattools.adfuller(y, regression='ct')
c = stattools.adfuller(y, regression='c')
nc = stattools.adfuller(y, regression='nc')
print(ctt)
print(ct)
print(c)
print(nc)
y_diff = y.diff().dropna()
y_diff.plot()
ctt = stattools.adfuller(y_diff, regression='ctt')
ct = stattools.adfuller(y_diff, regression='ct')
c = stattools.adfuller(y_diff, regression='c')
nc = stattools.adfuller(y_diff, regression='nc')
print(ctt)
print(ct)
print(c)
print(nc)
```
| github_jupyter |
# Python Bindings Demo
This is a very simple demo / playground / testing site for the Python Bindings for BART.
This is mainly used to show off Numpy interoperability and give a basic sense for how more complex tools will look in Python.
## Overview
Currently, Python users can interact with BART via a command-line wrapper. For example, the following line of Python code generates a simple Shepp-Logan phantom in K-Space and reconstructs the original image via inverse FFT.
```
shepp_ksp = bart(1, 'phantom -k -x 128')
shepp_recon = bart(1, 'fft -i 3' shepp_recon)
```
#### The Python bindings, `bartpy`, build on this wrapper in the following ways:
- 'Pythonic' interface with explicit functions and objects
- (Mostly) automated generation to minimize the maintenance burden
- Access to lower-level operators (e.g., `linops` submodule) to allow users to use BART functions seamlessly alongside Python libraries like Numpy, Sigpy, SciPy, Tensorflow or pyTorch
- RAM-based memory management
- Current wrapper writes data to disk, invokes the BART tools from the command line, and then reads data from disk
- Memory-based approach is ostensibly faster
## Getting Started
To begin, we import `numpy` and `matplotlib` for array manipulation and data visualization. We will then import the Python bindings
```
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits import mplot3d
%matplotlib inline
```
### BART-related Imports
We will now import `bartpy` tools for generating phantoms and performing the Fast Fourier Transform (FFT), as well as utilities for interacting with `.cfl` files.
```
from bartpy.simu.phantom import phantom
from bartpy.num.fft import fft, ifft
from bartpy.utils.cfl import readcfl, writecfl
```
## A closer look
<span style="font-size: 1.3em;">`phantom(dims, ksp, d3, ptype)`</span>
- `dims`: iterable specifying dimensions of the phantom. Cannot exceed 16, and follows BART dimension conventions
- `ksp`: boolean value indicating whether or not to generate the phantom in k-space
- `d3`: boolean value indicating whether or not to generate a 3D phantom
- `ptype`: Specifies type of phantom.
```
phantom
shepp = phantom([128, 128], ksp=False, d3=False)
plt.imshow(shepp))
```
## Reconstruction via FFT
### Command Line
Here is a simple recon task completed with BART on the command line.
```
!bart phantom -x 128 -k -B logo
!bart fft -i 3 logo logo_recon
gnd = readcfl('logo_recon')
plt.imshow(abs(gnd.T))
```
### Pure Python
Now here is our task completed entirely in Python, using `bartpy`
<span style="color: red">FIXME: The order of dimensions
is wrong</span>
```
logo_ksp = phantom([128, 128], ksp=True, ptype='bart')
plt.imshow(np.log(abs(logo_ksp)))
logo_recon = ifft(logo_ksp, flags=3)
plt.imshow(abs(logo_recon))
```
This is a brief example of the more 'Pythonic' approach offered by the Python bindings.
| github_jupyter |
# Convolutional Neural Networks
In this notebook we are going to explore the [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset (you don't need to download this dataset, we are going to use keras to download this dataset). This is a great dataset to train models for visual recognition and to start to build some models in Convolutional Neural Networks (CNN). This dataset consists of 60,000 32x32 colour images in 10 classes, with 6,000 images per class. There are 50,000 training images and 10,000 test images
As CNN's requires high-computational effort, we are going to use a reduced version of this training dataset. Given our time and computational resources restrictions, we are going to select 3 categories (airplane, horse and truck).
In this notebook, we are going to build two different models in order to classify the objects. First, we are going to build Shallow Neural Network based just in a few Fully-Connected Layers (aka Multi-layer Perceptron) and we are going to understand why is not feasible to classify images with such networks. Then, we are going to build a CNN network to perform the same task and evaluate its performance.
Again, in order to have a clean notebook, some functions are implemented in the file *utils.py* (e.g., plot_loss_and_accuracy).
Summary:
- [Downloading CIFAR-10 Dataset](#cifar)
- [Data Pre-processing](#reduce)
- [Reducing the Dataset](#red)
- [Normalising the Dataset](#normalise)
- [One-hot Encoding](#onehot)
- [Building the Shallow Neural Network](#shallow)
- [Training the Model](#train_shallow)
- [Prediction and Performance Analysis](#performance_sh)
- [Building the Convolutional Neural Network](#cnn)
- [Training the Model](#train_cnn)
- [Prediction and Performance Analysis](#performance_cnn)
```
# Standard libraries
import numpy as np # written in C, is faster and robust library for numerical and matrix operations
import pandas as pd # data manipulation library, it is widely used for data analysis and relies on numpy library.
import matplotlib.pyplot as plt # for plotting
import seaborn as sns # Plot nicely =) . Importing seaborn modifies the default matplotlib color schemes and plot
# styles to improve readability and aesthetics.
# Auxiliar functions
from utils import *
# the following to lines will tell to the python kernel to always update the kernel for every utils.py
# modification, without the need of restarting the kernel.
%load_ext autoreload
%autoreload 2
# using the 'inline' backend, your matplotlib graphs will be included in your notebook, next to the code
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
```
## Downloading CIFAR-10 Dataset
<a id='cifar'></a>
Keras provides several [datasets](https://keras.io/datasets/) for experimentation, this makes it easy to try new network architectures. In order to download the CIFAR-10 dataset, we need to import the library "[cifar10](https://keras.io/datasets/#cifar100-small-image-classification)" and call the method *load_data()".
```
from keras.datasets import cifar10 # Implements the methods to dowload CIFAR-10 dataset
(x_train, y_train), (x_test, y_test) = cifar10.load_data() #this will download the dataset
# by defaul, the dataset was split in 50,000 images for training and 10,000 images for testing
# we are going to use this configuration
y_train = y_train.ravel() # Return a contiguous flattened y_train
y_test = y_test.ravel() #Return a contiguous flattened y_test
```
Let's visualise how the images looks like. To plot the images we are going to use the function **plot_images** (see *utils.py*)
```
# from https://www.cs.toronto.edu/~kriz/cifar.html we can grab the class names
# 0 1 2 3 4 5 6 7 8 9
class_name = np.array(
['airplane', 'automobile','bird','cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'])
#
plot_samples(x_train, y_train, class_name)
```
## Data Pre-processing
<a id='reduce'></a>
As CNN's requires high-computational effort, we are going to use a reduced training dataset. Given our time and computational resources restrictions, we are going to select 3 categories (airplane, horse and truck) and for each category and select in total 1500 images.
Once obtained the reduced version, we are going to normalise the images and generate the one-hot enconding representation of the labels.
### Reducing the Dataset
<a id='red'></a>
```
# Lets select just 3 classes to make this tutorial feasible
selected_idx = np.array([0, 7, 9])
n_images = 1500
y_train_idx = np.isin(y_train, selected_idx)
y_test_idx = np.isin(y_test, selected_idx)
y_train_red = y_train[y_train_idx][:n_images]
x_train_red = x_train[y_train_idx][:n_images]
y_test_red = y_test[y_test_idx][:n_images]
x_test_red = x_test[y_test_idx][:n_images]
# replacing the labels 0, 7 and 9 to 0, 1, 2 repectively.
y_train_red[y_train_red == selected_idx[0]] = 0
y_train_red[y_train_red == selected_idx[1]] = 1
y_train_red[y_train_red == selected_idx[2]] = 2
y_test_red[y_test_red == selected_idx[0]] = 0
y_test_red[y_test_red == selected_idx[1]] = 1
y_test_red[y_test_red == selected_idx[2]] = 2
y_test_red[:4]
# visulising the images in the reduced dataset
plot_samples(x_train_red, y_train_red, class_name[selected_idx])
```
**Question 1**: Is the reduced dataset imbalanced?
**Question 2**: As you can see, the images have low resolution (32x32x3), how this can affect the model?
### Normalising the Dataset
<a id='normalise'></a>
Here we are going to normalise the dataset. In this task, we are going to divide each image by 255.0, as the images are represented as 'uint8' and we know that the range is from 0 to 255. By doing so, the range of the images will be between 0 and 1.
```
# Normalising the
x_train_red = x_train_red.astype('float32')
x_test_red = x_test_red.astype('float32')
x_train_red /= 255.0
x_test_red /= 255.0
```
### One-hot Encoding
<a id='onehot'></a>
The labels are encoded as integers (0, 1 and 2), as we are going to use a *softmax layer* as output for our models we need to convert the labels as binary matrix. For example, the label 0 (considering that we have just 3 classes) can be represented as [1 0 0], which is the class 0.
One-hot enconding together with the sofmax function will give us an interesting interpretation of the output as a probability distribution over the classes.
For this task, are going to use the function *[to_categorical](https://keras.io/utils/)*, which converts a class vector (integers) to binary class matrix.
```
y_train_oh = keras.utils.to_categorical(y_train_red)
y_test_oh = keras.utils.to_categorical(y_test_red)
print('Label: ',y_train_red[0], ' one-hot: ', y_train_oh[0])
print('Label: ',y_train_red[810], ' one-hot: ', y_train_oh[810])
print('Label: ',y_test_red[20], ' one-hot: ', y_test_oh[20])
```
## Building the Shallow Neural Network
<a id='shallow'></a>
Here we are going to build a Shallow Neural Network with 2 Fully Connected layers and one output layer. Basically, we are implemting a Multi-Layer Perceptron classifier.
To build the model, we are going use the following components from Keras:
- [Sequencial](https://keras.io/models/sequential/): allows us to create models layer-by-layer.
- [Dense](https://keras.io/layers/core/): provides a regular fully-connected layer
- [Dropout](https://keras.io/layers/core/#dropout): provides dropout regularisation
Basically, we are going to define the sequence of our model by using _Sequential()_, which include the layers:
```python
model = Sequential()
model.add(Dense(...))
...
```
once created the model we can configure the model for training by using the method [compile](https://keras.io/models/model/). Here we need to define the [loss](https://keras.io/losses/) function (mean squared error, categorical cross entropy, among others.), the [optimizer](https://keras.io/optimizers/) (Stochastic gradient descent, RMSprop, adam, among others) and the [metric](https://keras.io/metrics/) to define the evaluation metric to be used to evaluate the performance of the model in the training step, as follows:
```python
model.compile(loss = "...",
optimizer = "...")
```
Also, we have the option to see a summary representation of the model by using thebfunction [summary](https://keras.io/models/about-keras-models/#about-keras-models). This function summarise the model and tell us the number of parameters that we need to tune.
```
from keras.models import Sequential # implements sequential function
from keras.layers import Dense # implements the fully connected layer
from keras.layers import Dropout # implements Dropout regularisation
from keras.layers import Flatten # implements Flatten function
mlp = Sequential()
# Flatten will reshape the input an 1D array with dimension equal to 32 x 32 x 3 (3072)
# each pixel is an input for this model.
mlp.add(Flatten(input_shape=x_train_red.shape[1:])) #x_train.shape[1:] returns the shape
# First layer with 1024 neurons and relu as activation function
mlp.add(Dense(1024, activation='relu'))
mlp.add(Dropout(0.7)) # regularization with 70% of keep probability
# Second layer with 1024 neurons and relu as activation function
mlp.add(Dense(1024, activation='relu'))
mlp.add(Dropout(0.7))# regularization with 70% of keep probability
# Output layer with 3 neurons and sofmax as activation function
mlp.add(Dense(y_test_oh.shape[1], activation='softmax'))
```
Summarising the model
```
mlp.summary()
# Compile:
# Optimiser: rmsprop
# Loss: categorical_crossentropy, as our problem is multi-label classification
# Metric: accuracy
mlp.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
```
### Training the Model
<a id=train_shallow></a>
Once defined the model, we need to train it by using the function [fit](https://keras.io/models/model/). This function performs the optmisation step. Hence, we can define the following parameters such as:
- batch size: defines the number of samples that will be propagated through the network
- epochs: defines the number of times in which all the training set (x_train_scaled) are used once to update the weights
- validation split: defines the percentage of training data to be used for validation
- among others (click [here](https://keras.io/models/model/) for more information)
This function return the _history_ of the training, that can be used for further performance analysis.
```
# training the model (this will take a few minutes)
history = mlp.fit(x_train_red,
y_train_oh,
batch_size = 256,
epochs = 100,
validation_split = 0.2,
verbose = 1)
```
### Prediction and Performance Analysis
<a id='performance_sh'></a>
Here we plot the 'loss' and the 'Accuracy' from the training step.
```
plot_loss_and_accuracy_am2(history=history)
```
Let's evaluate the performance of this model under unseen data (x_test)
```
loss_value_mlp, acc_value_mlp = mlp.evaluate(x_test_red, y_test_oh, verbose=0)
print('Loss value: ', loss_value_mlp)
print('Acurracy value: ', acc_value_mlp)
```
## Building the Convolutional Neural Network
<a id='cnn'></a>
Here we are going to build a Convolutional Neural Network (CNN) for image classification. Given the time and computational resources limitations, we are going to build a very simple CNN, however, more complex and deep CNN's architectures such as VGG, Inception and ResNet are the state of the art in computer vision and they superpass the human performance in image classification tasks.
To build the model, we are going use the following components from Keras:
- [Sequencial](https://keras.io/models/sequential/): allows us to create models layer-by-layer.
- [Dense](https://keras.io/layers/core/): provides a regular fully-connected layer
- [Dropout](https://keras.io/layers/core/#dropout): provides dropout regularisation
- [Conv2D](https://keras.io/layers/convolutional/): implement 2D convolution function
- [BatchNormalization](https://keras.io/layers/normalization/): normalize the activations of the previous layer at each batch
- [MaxPooling2D](https://keras.io/layers/pooling/): provides pooling operation for spatial data
Basically, we are going to define the sequence of our model by using _Sequential()_, which include the layers:
```python
model = Sequential()
model.add(Conv2D(...))
...
```
once created the model the training configuration is the same as [before](#shallow):
```python
model.compile(loss = "...",
optimizer = "...")
```
```
from keras.models import Sequential
from keras.layers import Dense, Flatten, Activation
from keras.layers import Dropout, Conv2D, MaxPooling2D, BatchNormalization
model_cnn = Sequential()
# First layer:
# 2D convolution:
# Depth: 32
# Kernel shape: 3 x 3
# Stride: 1 (default)
# Activation layer: relu
# Padding: valid
# Input shape: 32 x 32 x 3 (3D representation, not Flatten as MLP)
# as you can see now the input is an image and not an flattened array
model_cnn.add(Conv2D(32, (3, 3), padding='valid', activation = 'relu',
input_shape=x_train_red.shape[1:]))
model_cnn.add(BatchNormalization())
model_cnn.add(MaxPooling2D(pool_size=(5,5))) # max pooling with kernel size 5x5
model_cnn.add(Dropout(0.7)) # 70% of keep probability
# Second layer:
# 2D convolution:
# Depth: 64
# Kernel shape: 3 x 3
# Stride: 1 (default)
# Activation layer: relu
# Padding: valid
model_cnn.add(Conv2D(64, (3, 3), padding='valid', activation = 'relu'))
model_cnn.add(BatchNormalization())
model_cnn.add(MaxPooling2D(pool_size=(2,2)))
model_cnn.add(Dropout(0.7))
# Flatten the output from the second layer to become the input of the Fully-connected
# layer (flattened representation as MLP)
model_cnn.add(Flatten())
# First fully-connected layer with 128 neurons and relu as activation function
model_cnn.add(Dense(128, activation = 'relu'))
# Output layer with 3 neurons and sofmax as activation function
model_cnn.add(Dense(y_test_oh.shape[1], activation='softmax'))
```
Summarising the model
```
model_cnn.summary()
```
As you can see, the CNN model (53,059 parameters) has less parameters than the MLP model (4,199,427 parameters). So this model is less prone to overfit.
```
# Compile:
# Optimiser: adam
# Loss: categorical_crossentropy, as our problem is multi-label classification
# Metric: accuracy
model_cnn.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
```
### Training the Model
<a id=train_cnn></a>
```
# this will take a few minutes
history_cnn = model_cnn.fit(x_train_red,
y_train_oh,
batch_size = 256,
epochs = 100,
validation_split = 0.2,
verbose = 1)
```
### Prediction and Performance Analysis
<a id='performance_cnn'></a>
```
plot_loss_and_accuracy_am2(history=history_cnn)
```
Let's evaluate the performance of this model under unseen data (x_test)
```
model_cnn.evaluate(x_test_red,y_test_oh)
loss_value_cnn, acc_value_cnn = model_cnn.evaluate(x_test_red, y_test_oh, verbose=0)
print('Loss value: ', loss_value_cnn)
print('Acurracy value: ', acc_value_cnn)
```
**Task**: Discuss CNN and MLP results.
**Your Turn**: Now we changed our mind, we found that detecting airplanes, horses and trucks is a bit boring :(. We would like to detect whether an image has a bird, a dog or a ship =)
Implement a CNN to classify the images of the new reduced dataset.
**Creating the dataset**
```
# Lets select just 3 classes to make this tutorial feasible
selected_idx = np.array([2, 5, 8])
n_images = 1500
y_train_idx = np.isin(y_train, selected_idx)
y_test_idx = np.isin(y_test, selected_idx)
y_train_new = y_train[y_train_idx][:n_images]
x_train_new = x_train[y_train_idx][:n_images]
y_test_new = y_test[y_test_idx][:n_images]
x_test_new = x_test[y_test_idx][:n_images]
# replacing the labels 0, 7 and 9 to 0, 1, 2 repectively.
y_train_new[y_train_new == selected_idx[0]] = 0
y_train_new[y_train_new == selected_idx[1]] = 1
y_train_new[y_train_new == selected_idx[2]] = 2
y_test_new[y_test_new == selected_idx[0]] = 0
y_test_new[y_test_new == selected_idx[1]] = 1
y_test_new[y_test_new == selected_idx[2]] = 2
# visulising the images in the reduced dataset
plot_samples(x_train_new, y_train_new, class_name[selected_idx])
```
**Pre-processing the new dataset**
```
# normalising the data
x_train_new = x_train_new.astype('float32')
x_test_new = x_test_new.astype('float32')
x_train_new /= 255.0
x_test_new /= 255.0
# creating the one-hot representation
y_train_oh_n = keras.utils.to_categorical(y_train_new)
y_test_oh_n = keras.utils.to_categorical(y_test_new)
print('Label: ',y_train_new[0], ' one-hot: ', y_train_oh_n[0])
print('Label: ',y_train_new[810], ' one-hot: ', y_train_oh_n[810])
print('Label: ',y_test_new[20], ' one-hot: ', y_test_oh_n[20])
```
**Step 1**: Create the CNN Model.
For example, you can try (Danger, Will Robinson! This model can overfits):
```python
model_cnn_new = Sequential()
model_cnn_new.add(Conv2D(32, (3, 3), padding='valid', activation = 'relu',
input_shape=x_train_new.shape[1:]))
model_cnn_new.add(BatchNormalization())
model_cnn_new.add(MaxPooling2D(pool_size=(2,2)))
model_cnn_new.add(Dropout(0.7))
# You can stack several convolution layers before apply BatchNormalization, MaxPooling2D
# and Dropout
model_cnn_new.add(Conv2D(32, (3, 3), padding='valid', activation = 'relu',
input_shape=x_train_new.shape[1:]))
model_cnn_new.add(Conv2D(16, (3, 3), padding='valid', activation = 'relu'))
model_cnn_new.add(Conv2D(64, (3, 3), padding='valid', activation = 'relu'))
model_cnn_new.add(BatchNormalization())
# You can also don't use max pooling... it is up to you
#model_cnn_new.add(MaxPooling2D(pool_size=(2,2))) # this line can lead to negative dimension problem
model_cnn_new.add(Dropout(0.7))
model_cnn_new.add(Conv2D(32, (5, 5), padding='valid', activation = 'relu'))
model_cnn_new.add(BatchNormalization())
model_cnn_new.add(MaxPooling2D(pool_size=(2,2)))
model_cnn_new.add(Dropout(0.7))
model_cnn_new.add(Flatten())
model_cnn_new.add(Dense(128, activation = 'relu'))
model_cnn_new.add(Dense(y_test_oh_n.shape[1], activation='softmax'))
```
**Step 2**: Summarise the model.
For example, you can try:
```python
model_cnn_new.summary()
```
**Step 3**: Define optimiser (try 'rmsprop', 'sgd', 'adagrad' or 'adadelta' if you wich), loss and metric
For example:
``` python
model_cnn_new.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
```
**Step 4**: Train the model, here you can define the number of epochs and batch_size that best fit for you model
For example:
```python
# this can take SEVERAL minutes or even hours.. days... if your model is quite deep
history_cnn_new = model_cnn_new.fit(x_train_new,
y_train_oh_n,
batch_size = 256,
epochs = 100,
validation_split = 0.2,
verbose = 1)
```
**Step 4**: Evaluate the model performance by using the metric that you think is the best.
For example:
```python
model_cnn_new.evaluate(x_test_new,y_test_oh_n)
loss_value_cnn_n, acc_value_cnn_n = model_cnn_new.evaluate(x_test_new, y_test_oh_n, verbose=0)
print('Loss value: ', loss_value_cnn_n)
print('Acurracy value: ', acc_value_cnn_n)
```
Plot the loss and accuracy if you which.
| github_jupyter |
```
import numpy as np
import plotly
import plotly.graph_objs as go
from collections import deque
import pandas as pd
import plotly.express as px
aa_df = pd.read_csv("/Users/anafink/OneDrive - bwedu/Bachelor MoBi/5. Fachsemester/Python Praktikum/advanced_python_2021-22_HD/data/amino_acid_properties.csv")
metrics = {}
hydropathy_aa = {}
pI_aa = {}
hp_type_aa = {}
hydropathy_aa = aa_df.groupby('1-letter code')['hydropathy index (Kyte-Doolittle method)'].apply(float).to_dict()
pI_aa = aa_df.groupby('1-letter code')['pI'].apply(float).to_dict()
metrics = {
"hydropathy" : hydropathy_aa,
"pI" : pI_aa,
}
class Protein:
def __init__(self, name, id, sequence):
self.name = name
self.id = id
self.sequence = sequence
def define_metrics(self, metric_aa = "hydropathy"):
metric_values = []
for pos, aa in enumerate(self.sequence):
metric_values.append(metrics[metric_aa][aa])
return metric_values
def get_aa_pos(self):
aa_pos = []
aa_pos = list(range(1,len(self.sequence)+1))
return aa_pos
def get_y_values(self, metric_aa = "hydropathy", window_size = 5 ):
metric_values = self.define_metrics(metric_aa)
window = deque([], maxlen = window_size)
mean_values = []
for value in metric_values:
window.append(value)
mean_values.append(np.mean(window))
return mean_values
def plot(self, metric="hydropathy", window_size = 5):
x_values = self.get_aa_pos()
y_values = self.get_y_values(metric, window_size)
data = [
go.Bar(
x = x_values,
y = y_values,
)
]
fig = go.Figure(data=data)
fig.update_layout(template="plotly_white", title="Protein: " + self.name)
return fig
path = "/Users/anafink/OneDrive - bwedu/Bachelor MoBi/5. Fachsemester/Python Praktikum/uniref-P32249-filtered-identity_1.0.fasta"
GPCR183_fasta = []
with open(path) as f:
for line in f:
GPCR183_fasta.append(line)
GPCR183_seq = GPCR183_fasta
for pos, seq in enumerate(GPCR183_seq):
if seq[0] == ">":
GPCR183_seq.pop(pos)
GPCR183_seq = [x[:-1] for x in GPCR183_seq]
GPCR183_seq = ''.join(GPCR183_seq)
def split(string):
return [char for char in string]
GPCR183_seq = split(GPCR183_seq)
GPCR183 = Protein("G-protein coupled receptor 183 (P32249)", "9606", GPCR183_seq)
figure = GPCR183.plot(window_size = 10)
figure.show()
```
| github_jupyter |
Notes:
- Using pandas.cut for binning. bin_min=-inf, bin_max=inf. Binning on normalized data only.* (Other option to explore: sklearn KBinsDiscretizer. Issue is that bins cant be predefined. We need same bins for each batch of y. advantage: inverse is easily available)
- Changing y to categorical only for training. not for valid, test. so shape of y will be different. shape[batch_size, lat, lon, variable, member] Is shape ok?
- Please check custom loss function in networks.py, train()- written but not verified, build_resnet_categorical(), create_predictions(), DataGenerator(), load_data()
-
ToDo
- currently works only for 2 output variables.
- implement weighted loss. train() function.
- sampling from output to create an ensemble.- done.
- use diff. in temp instead of absolute values.
- make spread-skill grid. see if spread-skill ratio close to 1 is for all places or how is it distributed.
```
%load_ext autoreload
%autoreload 2
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
from src.data_generator import *
from src.train import *
from src.utils import *
from src.networks import *
tf.__version__ #gotta check. some issue. not able to use gpu.
import tensorflow as tf
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
os.environ["CUDA_VISIBLE_DEVICES"]=str(0)
limit_mem()
args = load_args('../nn_configs/B/81.1-resnet_d3_dr_0.1.yml')
args['train_years']=['2017']
#args['valid_years']=['2018']
args['valid_years']=['2018-01-01','2018-03-31']
args['test_years']=['2018-04-01','2018-12-31']
args['model_save_dir'] ='/home/garg/data/WeatherBench/predictions/saved_models'
args['datadir']='/home/garg/data/WeatherBench/5.625deg'
args['is_categorical']=True
args['num_bins'], args['bin_min'], args['bin_max']
#num_bins=args['num_bins']
args['filters'] = [128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 2*args['num_bins']]
args['loss'] = 'lat_categorical_loss'
#could change it directly in build_resnet_categorical fn. Should we?
dg_train, dg_valid, dg_test = load_data(**args)
x,y=dg_train[0]; print(x.shape, y.shape)
x,y=dg_valid[0]; print(x.shape, y.shape)
x,y=dg_test[0]; print(x.shape, y.shape)
#only changing train shape.
```
## Training
```
model = build_resnet_categorical(
**args, input_shape=dg_train.shape,
)
model.summary()
args['loss']
model.compile(keras.optimizers.Adam(1e-3), loss=args['loss'])
model.compile(keras.optimizers.Adam(1e-3), loss=lat_categorical_loss)
def categorical_loss(y_true, y_pred):
cce=tf.keras.losses.CategoricalCrossentropy()
loss=0 #is this ok?
for i in range(2):
loss +=cce(y_true[:,:,:,i,:], y_pred[:,:,:,i,:])
return loss
model.compile(keras.optimizers.Adam(1e-3), loss=categorical_loss)
model.fit(dg_train, epochs=10, shuffle=False)
#exp_id=args['exp_id']
exp_id='categorical_v3'
model_save_dir=args['model_save_dir']
model.save(f'{model_save_dir}/{exp_id}.h5')
model.save_weights(f'{model_save_dir}/{exp_id}_weights.h5')
#to_pickle(history.history, f'{model_save_dir}/{exp_id}_history.pkl')
```
## Predictions
```
exp_id='categorical_v3'
model = keras.models.load_model(f"{args['model_save_dir']}/{exp_id}.h5",
custom_objects={'PeriodicConv2D': PeriodicConv2D, 'categorical_loss': tf.keras.losses.mse})
#check how to call loss correctly.
bin_min=args['bin_min']; bin_max=args['bin_max']
num_bins=args['num_bins']
member=100
preds = create_predictions(model, dg_valid, is_categorical=True,
num_bins=num_bins, bin_min=bin_min,
bin_max=bin_max, member=member)
#Check in datagenerator and train()
preds.to_netcdf('/home/garg/data/WeatherBench/predictions/categorical_v3.nc')
pred=np.asarray(preds.to_array(), dtype=np.float32).squeeze();
pred.shape
#should we change shape to (2,100,1042, 32, 64)? that's what was in test-time dropout
preds
```
## Evaluation
```
datadir=args['datadir']
z500_valid = load_test_data(f'{datadir}/geopotential_500', 'z').drop('level')
t850_valid = load_test_data(f'{datadir}/temperature_850', 't').drop('level')
valid = xr.merge([z500_valid, t850_valid]).sel(time=preds.time)
ensemblemean=preds.mean('member') #ensemble
mean_rmse_p=compute_weighted_rmse(valid,ensemblemean).load(); print(mean_rmse_p)
crps=compute_weighted_crps(preds,valid).load(); print(crps)
spread=compute_weighted_meanspread(preds).load()
spread_skill_z=spread.z_mean_spread/mean_rmse_p.z_rmse
spread_skill_t=spread.t_mean_spread/mean_rmse_p.t_rmse
print(spread_skill_z, spread_skill_t)
obs.shape, pred.shape
#!pip install rank-histogram
from ranky import rankz
obs = np.asarray(valid.to_array(), dtype=np.float32).squeeze();
obs_z500=obs[0,...].squeeze()
obs_t850=obs[1,...].squeeze()
mask=np.ones(obs_z500.shape) #useless. #masked where 0/false.
pred=np.asarray(preds.to_array(), dtype=np.float32).squeeze();
shape=pred.shape #maybe we could change shape of pred in create_predcitions() directly
pred2=pred.reshape(shape[0], shape[4],shape[1], shape[2], shape[3])
pred_z500=pred2[0,...].squeeze()
pred_t850=pred2[1,...].squeeze()
# feed into rankz function
result = rankz(obs_z500, pred_z500, mask)
# plot histogram
plt.bar(range(1,pred_z500.shape[0]+2), result[0])
# view histogram
plt.show()
#random point.
plt.hist(pred[1,20,30,40,:], label='preds')
plt.hist(obs[1,20,30,40], label='truth')
plt.legend(loc='upper right')
plt.show()
#random point.
plt.hist(pred[1,20,25,30,:], label='preds')
plt.hist(obs[1,20,25,30], label='truth')
plt.legend(loc='upper right')
plt.show()
import seaborn
seaborn.distplot(pred[1,20,30,40,:])
seaborn.distplot(pred[1,20,25,30,:])
#spread-skill is good. rank histogram weird.
#density plot doesnt look like a gaussian.
```
## Testing code below. Ignore.
```
dg=dg_valid
level_names = dg.data.isel(level=dg.output_idxs).level_names
level = dg.data.isel(level=dg.output_idxs).level
x,y=dg_valid[0]
preds=preds=model.predict(dg_valid[0])
print(preds.shape, y.shape)
bin_min=args['bin_min']; bin_max=args['bin_max']
bins=np.linspace(bin_min, bin_max, num_bins+1)
bins, bins.shape
interval=(bin_max-bin_min)/num_bins
print(interval)
bin_mids=np.linspace(bin_min+0.5*interval, bin_max-0.5*interval, num_bins)
bin_mids, bin_mids.shape
plt.plot(bin_mids, preds[0,30,0,0,:]) #some random point
plt.plot(bin_mids, preds[10,30,20,0,:]) #some random point
sample=np.random.choice(bin_mids, size=50, p=preds[0,30,0,0,:], replace=True)
sample
plt.hist(sample, range=(bin_min,bin_max))
member=100
preds_shape=preds.shape
preds=preds.reshape(-1,num_bins)
preds_new=[]
for i, p in enumerate(preds):
sample=np.random.choice(bin_mids, size=member, p=preds[i,:],replace=True)
preds_new.append(sample)
preds_new=np.array(preds_new)
print(preds_new.shape)
preds_new3=preds_new.reshape(preds_shape[0],preds_shape[1],
preds_shape[2],member, preds_shape[3],)
print(preds_new3.shape)
preds_new=np.array(preds_new)
print(preds_new.shape)
preds_new2=preds_new.reshape(preds_shape[0],preds_shape[1],preds_shape[2],preds_shape[3],member)
print(preds_new2.shape)
preds_new_shape=preds_new2.shape
preds_new2=preds_new2.reshape(preds_new_shape[0],preds_new_shape[1],
preds_new_shape[2],
preds_new_shape[4],preds_new_shape[3])
print(preds_new2.shape)
dg=dg_valid
preds_new2 = xr.DataArray(
preds_new2,
dims=['time', 'lat', 'lon', 'member', 'level'],
coords={'time': dg.valid_time[0:32], 'lat': dg.data.lat,
'lon': dg.data.lon,
'member': np.arange(member),
'level': level,
'level_names': level_names,
},
)
mean = dg.mean.isel(level=dg.output_idxs).values
std = dg.std.isel(level=dg.output_idxs).values
preds_new2 = preds_new2 * std + mean
unique_vars = list(set([l.split('_')[0] for l in preds_new2.level_names.values]))
das = []
for v in unique_vars:
idxs = [i for i, vv in enumerate(preds_new2.level_names.values) if vv.split('_')[0] in v]
da = preds_new2.isel(level=idxs).squeeze().drop('level_names')
if not 'level' in da.dims: da = da.drop('level')
das.append({v: da})
preds_final=xr.merge(das)
preds_final
preds_final.to_netcdf('/home/garg/data/WeatherBench/predictions/categorical_v3.nc')
sample1=np.random.choice(5, 20, p=[0.05, 0.2, 0.5, 0.2, 0.05])
sample1
args['datadir']
datadir=args['datadir']
z500_valid = load_test_data(f'{datadir}/geopotential_500', 'z').drop('level')
t850_valid = load_test_data(f'{datadir}/temperature_850', 't').drop('level')
valid = xr.merge([z500_valid, t850_valid]).sel(time=preds_final.time)
valid
ensemblemean=preds_final.mean('member') #ensemble
mean_rmse_p=compute_weighted_rmse(valid,ensemblemean).load(); print(mean_rmse_p)
crps=compute_weighted_crps(preds_final,valid).load(); print(crps)
else:
preds = xr.DataArray(
model.predict(dg)[0] if multi_dt else model.predict(dg),
dims=['time', 'lat', 'lon', 'level'],
coords={'time': dg.valid_time, 'lat': dg.data.lat, 'lon': dg.data.lon,
'level': level,
'level_names': level_names
},
)
if is_categorical==True:
#Have to do this weird reshapings else cant sample, unnormalize. Not sure if its correct. Please check.
preds=model.predict(dg)[0] if multi_dt else model.predict(dg)
interval=(bin_max-bin_min)/num_bins
bin_mids=np.linspace(bin_min+0.5*interval, bin_max-0.5*interval, num_bins)
preds_shape=preds.shape
preds=preds.reshape(-1,num_bins)
preds_new=[]
for i, p in enumerate(preds):
sample=np.random.choice(bin_mids, size=member,p=preds[i,:],replace=True)
preds_new.append(sample)
preds_new=np.array(preds_new)
preds_new=preds_new.reshape(preds_shape[0],preds_shape[1],preds_shape[2],
preds_shape[3],member)
preds_new_shape=preds_new.shape
preds_new=preds_new.reshape(preds_new_shape[0],preds_new_shape[1],
preds_new_shape[2],preds_new_shape[4],
preds_new_shape[3],
preds = xr.DataArray(
preds_new,
dims=['time', 'lat', 'lon', 'member', 'level'],
coords={'time': dg.valid_time, 'lat': dg.data.lat, 'lon': dg.data.lon,
'member': np.arange(member),
'level': level,
'level_names': level_names,
},)
if is_categorical==True:
#Have to do this weird reshapings else cant sample, unnormalize.
#Not sure if its correct. Please check.
preds=model.predict(dg)[0] if multi_dt else model.predict(dg)
interval=(bin_max-bin_min)/num_bins
bin_mids=np.linspace(bin_min+0.5*interval, bin_max-0.5*interval, num_bins)
preds_shape=preds.shape
preds=preds.reshape(-1,num_bins)
preds_new=[]
for i, p in enumerate(preds):
sample=np.random.choice(bin_mids, size=member,p=preds[i,:],replace=True)
preds_new.append(sample)
preds_new=np.array(preds_new)
preds_new=preds_new.reshape(preds_shape[0],preds_shape[1],preds_shape[2],
preds_shape[3],member)
preds_new_shape=preds_new.shape
preds_new=preds_new.reshape(preds_new_shape[0],preds_new_shape[1],
preds_new_shape[2],preds_new_shape[4],
preds_new_shape[3],
preds = xr.DataArray(
preds_new,
dims=['time', 'lat', 'lon', 'member', 'level'],
coords={'time': dg.valid_time, 'lat': dg.data.lat, 'lon': dg.data.lon,
'member': np.arange(member),
'level': level,
'level_names': level_names,
},)
```
| github_jupyter |
```
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
```
# Reflect Tables into SQLAlchemy ORM
```
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
```
# Exploratory Climate Analysis
```
# Calculate the date 1 year ago from the last data point in the database
one_yr_ago = dt.date(2017, 8, 23) - dt.timedelta(days=365)
# Design a query to retrieve the last 12 months of precipitation data and plot the results
measurement_12 = session.query(Measurement.date,Measurement.prcp).filter(Measurement.date >= one_yr_ago).order_by(Measurement.date.desc()).all()
# for measure in measurement_12:
# print(measure.date, measure.prcp)
# Perform a query to retrieve the data and precipitation scores
df = pd.DataFrame(measurement_12[:2230], columns=['Date', 'Precipitation'])
df.set_index('Date', inplace=True )
# Save the query results as a Pandas DataFrame and set the index to the date column
# Sort the dataframe by date
result = df.sort_values(by='Date', ascending=False)
result.head(10)
# Use Pandas Plotting with Matplotlib to plot the data
# ax = df.iloc[:20:-1].plot(kind='bar', title ='Precipitation', figsize=(15, 10), legend=True, fontsize=12)
# ax.set_xlabel("Date", fontsize=12)
# ax.set_ylabel("Precipitation", fontsize=12)
# plt.show()
result.plot(rot=90)
# Use Pandas to calcualte the summary statistics for the precipitation data
df.describe()
# Design a query to show how many stations are available in this dataset?
station_no = session.query(Measurement,Station).filter(Measurement.station == Station.station).all()
# station_count =station.count(station.station)
station_count = []
for stn in station_no:
(mt,st) = stn
station_count.append(st.station)
len(set(station_count))
# What are the most active stations? (i.e. what stations have the most rows)?
# List the stations and the counts in descending order.
Counter(station_count)
# Using the station id from the previous query, calculate the lowest temperature recorded,
# highest temperature recorded, and average temperature of the most active station?
station_q = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\
filter(Measurement.station == 'USC00519281').all()
station_q
# Choose the station with the highest number of temperature observations.
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
import datetime as dt
from pandas.plotting import table
prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
results = session.query(Measurement.tobs).\
filter(Measurement.station == 'USC00519281').\
filter(Measurement.date >= prev_year).all()
df = pd.DataFrame(results, columns=['tobs'])
df.plot.hist(bins=12)
plt.tight_layout()
```
## Optional Challenge Assignment
```
# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d'
# and return the minimum, average, and maximum temperatures for that range of dates
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
# function usage example
print(calc_temps('2012-02-28', '2012-03-05'))
# Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax
# for your trip using the previous year's data for those same dates.
import datetime as dt
prev_year_start = dt.date(2018, 1, 1) - dt.timedelta(days=365)
prev_year_end = dt.date(2018, 1, 7) - dt.timedelta(days=365)
tmin, tavg, tmax = calc_temps(prev_year_start.strftime("%Y-%m-%d"), prev_year_end.strftime("%Y-%m-%d"))[0]
print(tmin, tavg, tmax)
# Plot the results from your previous query as a bar chart.
# Use "Trip Avg Temp" as your Title
# Use the average temperature for the y value
# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)
fig, ax = plt.subplots(figsize=plt.figaspect(2.))
xpos = 1
yerr = tmax-tmin
bar = ax.bar(xpos, tmax, yerr=yerr, alpha=0.5, color='coral', align="center")
ax.set(xticks=range(xpos), xticklabels="a", title="Trip Avg Temp", ylabel="Temp (F)")
ax.margins(.2, .2)
# fig.autofmt_xdate()
fig.tight_layout()
fig.show()
# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.
# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation
start_date = '2012-01-01'
end_date = '2012-01-07'
sel = [Station.station, Station.name, Station.latitude,
Station.longitude, Station.elevation, func.sum(Measurement.prcp)]
results = session.query(*sel).\
filter(Measurement.station == Station.station).\
filter(Measurement.date >= start_date).\
filter(Measurement.date <= end_date).\
group_by(Station.name).order_by(func.sum(Measurement.prcp).desc()).all()
print(results)
# Create a query that will calculate the daily normals
# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)
def daily_normals(date):
"""Daily Normals.
Args:
date (str): A date string in the format '%m-%d'
Returns:
A list of tuples containing the daily normals, tmin, tavg, and tmax
"""
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all()
daily_normals("01-01")
# calculate the daily normals for your trip
# push each tuple of calculations into a list called `normals`
trip_start = '2018-01-01'
trip_end = '2018-01-07'
# Set the start and end date of the trip
trip_dates = pd.date_range(trip_start, trip_end, freq='D')
# Use the start and end date to create a range of dates
trip_month_day = trip_dates.strftime('%m-%d')
# Stip off the year and save a list of %m-%d strings
# Loop through the list of %m-%d strings and calculate the normals for each date
normals = []
for date in trip_month_day:
normals.append(*daily_normals(date))
normals
# Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index
df = pd.DataFrame(normals, columns=['tmin', 'tavg', 'tmax'])
df['date'] = trip_dates
df.set_index(['date'],inplace=True)
df.head()
# Plot the daily normals as an area plot with `stacked=False`
df.plot(kind='area', stacked=False, x_compat=True, alpha=.2)
plt.tight_layout()
```
| github_jupyter |
## Scrape Archived Mini Normals from Mafiascum.net
#### Scrapy Structure/Lingo:
**Spiders** extract data **items**, which Scrapy send one by one to a configured **item pipeline** (if there is possible) to do post-processing on the items.)
## Import relevant packages...
```
import scrapy
import math
import logging
import json
from scrapy.crawler import CrawlerProcess
from scrapy.spiders import CrawlSpider, Rule
from scrapy.item import Item, Field
from scrapy.selector import Selector
```
## Initial variables...
```
perpage = 25
class PostItem(scrapy.Item):
pagelink = scrapy.Field()
forum = scrapy.Field()
thread = scrapy.Field()
number = scrapy.Field()
timestamp = scrapy.Field()
user = scrapy.Field()
content = scrapy.Field()
```
## Define what happens to scrape output...
```
# The following pipeline stores all scraped items (from all spiders)
# into a single items.jl file, containing one item per line serialized
# in JSON format:
class JsonWriterPipeline(object):
# operations performed when spider starts
def open_spider(self, spider):
self.file = open('posts.jl', 'w')
# when the spider finishes
def close_spider(self, spider):
self.file.close()
# when the spider yields an item
def process_item(self, item, spider):
line = json.dumps(dict(item)) + "\n"
self.file.write(line)
return item
```
## Define spider...
```
class MafiaScumSpider(scrapy.Spider):
name = 'mafiascum'
# define set of threads we're going to scrape from (ie all of them)
start_urls = [each[:each.find('\n')] for each in open('archive.txt').read().split('\n\n\n')]
# settings
custom_settings = {'LOG_LEVEL': logging.WARNING,
'ITEM_PIPELINES': {'__main__.JsonWriterPipeline': 1}}
# get page counts and then do the REAL parse on every single page
def parse(self, response):
# find page count
try:
postcount = Selector(response).xpath(
'//div[@class="pagination"]/text()').extract()
postcount = int(postcount[0][4:postcount[0].find(' ')])
# yield parse for every page of thread
for i in range(math.ceil(postcount/perpage)):
yield scrapy.Request(response.url+'&start='+str(i*perpage),
callback=self.parse_page)
except IndexError: # if can't, the thread probably doesn't exist
return
def parse_page(self, response):
# scan through posts on page and yield Post items for each
sel = Selector(response)
location = sel.xpath('//div[@id="page-body"]/h2/a/@href').extract()[0]
forum = location[location.find('f=')+2:location.find('&t=')]
if location.count('&') == 1:
thread = location[location.find('&t=')+3:]
elif location.count('&') == 2:
thread = location[
location.find('&t=')+3:location.rfind('&')]
posts = (sel.xpath('//div[@class="post bg1"]') +
sel.xpath('//div[@class="post bg2"]'))
for p in posts:
post = PostItem()
post['forum'] = forum
post['thread'] = thread
post['pagelink'] = response.url
try:
post['number'] = p.xpath(
'div/div[@class="postbody"]/p/a[2]/strong/text()').extract()[0][1:]
except IndexError:
post['number'] = p.xpath(
'div[@class="postbody"]/p/a[2]/strong/text()').extract()[0][1:]
try:
post['timestamp'] = p.xpath(
'div/div/p/text()[4]').extract()[0][23:-4]
except IndexError:
post['timestamp'] = p.xpath(
'div[@class="postbody"]/p/text()[4]').extract()[0][23:-4]
try:
post['user'] = p.xpath('div/div/dl/dt/a/text()').extract()[0]
except IndexError:
post['user'] = '<<DELETED_USER>>'
try:
post['content'] = p.xpath(
'div/div/div[@class="content"]').extract()[0][21:-6]
except IndexError:
post['content'] = p.xpath(
'div[@class="postbody"]/div[@class="content"]').extract()[0][21:-6]
yield post
```
## Start scraping...
```
process = CrawlerProcess({
'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
})
process.crawl(MafiaScumSpider)
process.start()
```
...and output should be a json file in same directory as this notebook!
## Leftover Code...
```
# open mini normal archive
# ??? i don't remember what this does; probably helped me collect archive links some time ago
runthis = False
if runthis:
# relevant packages
from selenium import webdriver
from scrapy.selector import Selector
import re
# configure browser
options = webdriver.ChromeOptions()
options.binary_location = '/Applications/Google Chrome Canary.app/Contents/MacOS/Google Chrome Canary'
options.add_argument('window-size=800x841')
driver = webdriver.Chrome(chrome_options=options)
# get the thread titles and links
links = []
titles = []
for i in range(0, 400, 100):
driver.get('https://forum.mafiascum.net/viewforum.php?f=53&start=' + str(i))
sel = Selector(text=driver.page_source)
links += sel.xpath('//div[@class="forumbg"]/div/ul[@class="topiclist topics"]/li/dl/dt/a[1]/@href').extract()
titles += sel.xpath('//div[@class="forumbg"]/div/ul[@class="topiclist topics"]/li/dl/dt/a[1]/text()').extract()
# formatting, excluding needless threads...
titles = titles[1:]
links = links[1:]
del links[titles.index('Mini Normal Archives')]
del titles[titles.index('Mini Normal Archives')]
titles = [re.search(r'\d+', each).group(0) for each in titles]
# match txt archive game numbers with forum archive game numbers to find links
f = open('archive.txt', 'r')
txtarchives = f.read().split('\n\n\n')
numbers = [re.search(r'\d+', each[:each.find('\n')]).group(0) for each in txtarchives]
f.close()
# store the result...
for i, n in enumerate(numbers):
txtarchives[i] = 'http://forum.mafiascum.net' + links[titles.index(n)][1:] + '\n' + txtarchives[i]
f = open('archive2.txt', 'w')
f.write('\n\n\n'.join(txtarchives))
f.close()
```
| github_jupyter |
While going through our script we will gradually understand the use of this packages
```
import tensorflow as tf #no need to describe ;)
import numpy as np #allows array operation
import pandas as pd #we will use it to read and manipulate files and columns content
from nltk.corpus import stopwords #provides list of english stopwords
stop = stopwords.words('english')
#PRINT VERSION!!
tf.__version__
```
To do this notebook we will use New York Times user comments (from Kaggle Datasets).
When we will create the language classifier we will use other data but now let's rely on an english natural language source, so now we read the data.
```
#PLEASE DOWNLOAD THE FILE HERE: https://www.kaggle.com/aashita/nyt-comments
train = pd.read_csv('CommentsApril2017.csv')
```
Let's have a quick look at the data trying to find what is the column that we need.
Looks like commentBody is the right candidate.
```
train.head()
```
now we first put everything to lowercase and then replace undesired characters
```
train['commentBody_lower'] = train["commentBody"].str.lower()
train['commentBody_no_punctiation'] = train['commentBody_lower'].str.replace('[^\w\s]','')
```
let's check how the text looks like now!
Well everything is lowercase and no "ugly characters"
```
train['commentBody_no_punctiation'].head()
```
Now we remove stopwords and then fill empy cells with "fillna" word.
```
train['commentBody_no_stopwords'] = train['commentBody_no_punctiation'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))
train["commentBody_no_stopwords"] = train["commentBody_no_stopwords"].fillna("fillna")
```
This is how our cleaned text looks like we can see that everything is lowercase and the stopwords are missing, for example "this". Now let's go back to slides.
```
train['commentBody_no_stopwords'].head()
tf_train = train
```
We first assign our current data frame to another to keep track of our work then we read the first sentence and count words that result to be 21
```
tf_train['commentBody_no_stopwords'][1]
tf_train['commentBody_no_stopwords'][1].count(' ')
max_features=5000 #we set maximum number of words to 5000
maxlen=100 #and maximum sequence length to 100
tok = tf.keras.preprocessing.text.Tokenizer(num_words=max_features) #tokenizer step
tok.fit_on_texts(list(tf_train['commentBody_no_stopwords'])) #fit to cleaned text
tf_train=tok.texts_to_sequences(list(tf_train['commentBody_no_stopwords'])) #this is how we create sequences
print(type(tf_train)) #we see that the type is now list
print(len(tf_train[1])) #we see that the number of words of the sentence is decreased to 16
tf_train[1] #and this is how our sentece looks like now, exactly a sequence of integers
tf_train=tf.keras.preprocessing.sequence.pad_sequences(tf_train, maxlen=maxlen) #let's execute pad step
print(len(tf_train[1]))
tf_train[1] #this is how our sentece looks like after the pad step we don't have anymore 16 words but 100 (equivalent to maxlen)
train['commentBody_no_stopwords'][1] #let's look at the input text
tf_train = pd.DataFrame(tf_train)
tf_train.head() #let's look at the final matrix that will use as an input for our deep learning algorithms, do you remember
#how original text looked like?
```
| github_jupyter |
# Autonomous driving - Car detection
Welcome to your week 3 programming assignment. You will learn about object detection using the very powerful YOLO model. Many of the ideas in this notebook are described in the two YOLO papers: Redmon et al., 2016 (https://arxiv.org/abs/1506.02640) and Redmon and Farhadi, 2016 (https://arxiv.org/abs/1612.08242).
**You will learn to**:
- Use object detection on a car detection dataset
- Deal with bounding boxes
Run the following cell to load the packages and dependencies that are going to be useful for your journey!
```
import argparse
import os
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
import scipy.io
import scipy.misc
import numpy as np
import pandas as pd
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes
from yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body
%matplotlib inline
```
**Important Note**: As you can see, we import Keras's backend as K. This means that to use a Keras function in this notebook, you will need to write: `K.function(...)`.
## 1 - Problem Statement
You are working on a self-driving car. As a critical component of this project, you'd like to first build a car detection system. To collect data, you've mounted a camera to the hood (meaning the front) of the car, which takes pictures of the road ahead every few seconds while you drive around.
<center>
<video width="400" height="200" src="nb_images/road_video_compressed2.mp4" type="video/mp4" controls>
</video>
</center>
<caption><center> Pictures taken from a car-mounted camera while driving around Silicon Valley. <br> We would like to especially thank [drive.ai](https://www.drive.ai/) for providing this dataset! Drive.ai is a company building the brains of self-driving vehicles.
</center></caption>
<img src="nb_images/driveai.png" style="width:100px;height:100;">
You've gathered all these images into a folder and have labelled them by drawing bounding boxes around every car you found. Here's an example of what your bounding boxes look like.
<img src="nb_images/box_label.png" style="width:500px;height:250;">
<caption><center> <u> **Figure 1** </u>: **Definition of a box**<br> </center></caption>
If you have 80 classes that you want YOLO to recognize, you can represent the class label $c$ either as an integer from 1 to 80, or as an 80-dimensional vector (with 80 numbers) one component of which is 1 and the rest of which are 0. The video lectures had used the latter representation; in this notebook, we will use both representations, depending on which is more convenient for a particular step.
In this exercise, you will learn how YOLO works, then apply it to car detection. Because the YOLO model is very computationally expensive to train, we will load pre-trained weights for you to use.
## 2 - YOLO
YOLO ("you only look once") is a popular algoritm because it achieves high accuracy while also being able to run in real-time. This algorithm "only looks once" at the image in the sense that it requires only one forward propagation pass through the network to make predictions. After non-max suppression, it then outputs recognized objects together with the bounding boxes.
### 2.1 - Model details
First things to know:
- The **input** is a batch of images of shape (m, 608, 608, 3)
- The **output** is a list of bounding boxes along with the recognized classes. Each bounding box is represented by 6 numbers $(p_c, b_x, b_y, b_h, b_w, c)$ as explained above. If you expand $c$ into an 80-dimensional vector, each bounding box is then represented by 85 numbers.
We will use 5 anchor boxes. So you can think of the YOLO architecture as the following: IMAGE (m, 608, 608, 3) -> DEEP CNN -> ENCODING (m, 19, 19, 5, 85).
Lets look in greater detail at what this encoding represents.
<img src="nb_images/architecture.png" style="width:700px;height:400;">
<caption><center> <u> **Figure 2** </u>: **Encoding architecture for YOLO**<br> </center></caption>
If the center/midpoint of an object falls into a grid cell, that grid cell is responsible for detecting that object.
Since we are using 5 anchor boxes, each of the 19 x19 cells thus encodes information about 5 boxes. Anchor boxes are defined only by their width and height.
For simplicity, we will flatten the last two last dimensions of the shape (19, 19, 5, 85) encoding. So the output of the Deep CNN is (19, 19, 425).
<img src="nb_images/flatten.png" style="width:700px;height:400;">
<caption><center> <u> **Figure 3** </u>: **Flattening the last two last dimensions**<br> </center></caption>
Now, for each box (of each cell) we will compute the following elementwise product and extract a probability that the box contains a certain class.
<img src="nb_images/probability_extraction.png" style="width:700px;height:400;">
<caption><center> <u> **Figure 4** </u>: **Find the class detected by each box**<br> </center></caption>
Here's one way to visualize what YOLO is predicting on an image:
- For each of the 19x19 grid cells, find the maximum of the probability scores (taking a max across both the 5 anchor boxes and across different classes).
- Color that grid cell according to what object that grid cell considers the most likely.
Doing this results in this picture:
<img src="nb_images/proba_map.png" style="width:300px;height:300;">
<caption><center> <u> **Figure 5** </u>: Each of the 19x19 grid cells colored according to which class has the largest predicted probability in that cell.<br> </center></caption>
Note that this visualization isn't a core part of the YOLO algorithm itself for making predictions; it's just a nice way of visualizing an intermediate result of the algorithm.
Another way to visualize YOLO's output is to plot the bounding boxes that it outputs. Doing that results in a visualization like this:
<img src="nb_images/anchor_map.png" style="width:200px;height:200;">
<caption><center> <u> **Figure 6** </u>: Each cell gives you 5 boxes. In total, the model predicts: 19x19x5 = 1805 boxes just by looking once at the image (one forward pass through the network)! Different colors denote different classes. <br> </center></caption>
In the figure above, we plotted only boxes that the model had assigned a high probability to, but this is still too many boxes. You'd like to filter the algorithm's output down to a much smaller number of detected objects. To do so, you'll use non-max suppression. Specifically, you'll carry out these steps:
- Get rid of boxes with a low score (meaning, the box is not very confident about detecting a class)
- Select only one box when several boxes overlap with each other and detect the same object.
### 2.2 - Filtering with a threshold on class scores
You are going to apply a first filter by thresholding. You would like to get rid of any box for which the class "score" is less than a chosen threshold.
The model gives you a total of 19x19x5x85 numbers, with each box described by 85 numbers. It'll be convenient to rearrange the (19,19,5,85) (or (19,19,425)) dimensional tensor into the following variables:
- `box_confidence`: tensor of shape $(19 \times 19, 5, 1)$ containing $p_c$ (confidence probability that there's some object) for each of the 5 boxes predicted in each of the 19x19 cells.
- `boxes`: tensor of shape $(19 \times 19, 5, 4)$ containing $(b_x, b_y, b_h, b_w)$ for each of the 5 boxes per cell.
- `box_class_probs`: tensor of shape $(19 \times 19, 5, 80)$ containing the detection probabilities $(c_1, c_2, ... c_{80})$ for each of the 80 classes for each of the 5 boxes per cell.
**Exercise**: Implement `yolo_filter_boxes()`.
1. Compute box scores by doing the elementwise product as described in Figure 4. The following code may help you choose the right operator:
```python
a = np.random.randn(19*19, 5, 1)
b = np.random.randn(19*19, 5, 80)
c = a * b # shape of c will be (19*19, 5, 80)
```
2. For each box, find:
- the index of the class with the maximum box score ([Hint](https://keras.io/backend/#argmax)) (Be careful with what axis you choose; consider using axis=-1)
- the corresponding box score ([Hint](https://keras.io/backend/#max)) (Be careful with what axis you choose; consider using axis=-1)
3. Create a mask by using a threshold. As a reminder: `([0.9, 0.3, 0.4, 0.5, 0.1] < 0.4)` returns: `[False, True, False, False, True]`. The mask should be True for the boxes you want to keep.
4. Use TensorFlow to apply the mask to box_class_scores, boxes and box_classes to filter out the boxes we don't want. You should be left with just the subset of boxes you want to keep. ([Hint](https://www.tensorflow.org/api_docs/python/tf/boolean_mask))
Reminder: to call a Keras function, you should use `K.function(...)`.
```
# GRADED FUNCTION: yolo_filter_boxes
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6):
"""Filters YOLO boxes by thresholding on object and class confidence.
Arguments:
box_confidence -- tensor of shape (19, 19, 5, 1)
boxes -- tensor of shape (19, 19, 5, 4)
box_class_probs -- tensor of shape (19, 19, 5, 80)
threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
Returns:
scores -- tensor of shape (None,), containing the class probability score for selected boxes
boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes
classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes
Note: "None" is here because you don't know the exact number of selected boxes, as it depends on the threshold.
For example, the actual output size of scores would be (10,) if there are 10 boxes.
"""
# Step 1: Compute box scores
### START CODE HERE ### (≈ 1 line)
box_scores = box_confidence * box_class_probs
### END CODE HERE ###
# Step 2: Find the box_classes thanks to the max box_scores, keep track of the corresponding score
### START CODE HERE ### (≈ 2 lines)
box_classes = K.argmax(box_scores, axis=-1)
box_class_scores = K.max(box_scores,axis=-1)
### END CODE HERE ###
# Step 3: Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the
# same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)
### START CODE HERE ### (≈ 1 line)
filtering_mask = box_class_scores >= threshold
### END CODE HERE ###
# Step 4: Apply the mask to scores, boxes and classes
### START CODE HERE ### (≈ 3 lines)
scores = tf.boolean_mask(box_class_scores, filtering_mask)
boxes = tf.boolean_mask(boxes, filtering_mask)
classes = tf.boolean_mask(box_classes, filtering_mask)
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_a:
box_confidence = tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1)
boxes = tf.random_normal([19, 19, 5, 4], mean=1, stddev=4, seed = 1)
box_class_probs = tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1)
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = 0.5)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.shape))
print("boxes.shape = " + str(boxes.shape))
print("classes.shape = " + str(classes.shape))
```
**Expected Output**:
<table>
<tr>
<td>
**scores[2]**
</td>
<td>
10.7506
</td>
</tr>
<tr>
<td>
**boxes[2]**
</td>
<td>
[ 8.42653275 3.27136683 -0.5313437 -4.94137383]
</td>
</tr>
<tr>
<td>
**classes[2]**
</td>
<td>
7
</td>
</tr>
<tr>
<td>
**scores.shape**
</td>
<td>
(?,)
</td>
</tr>
<tr>
<td>
**boxes.shape**
</td>
<td>
(?, 4)
</td>
</tr>
<tr>
<td>
**classes.shape**
</td>
<td>
(?,)
</td>
</tr>
</table>
### 2.3 - Non-max suppression ###
Even after filtering by thresholding over the classes scores, you still end up a lot of overlapping boxes. A second filter for selecting the right boxes is called non-maximum suppression (NMS).
<img src="nb_images/non-max-suppression.png" style="width:500px;height:400;">
<caption><center> <u> **Figure 7** </u>: In this example, the model has predicted 3 cars, but it's actually 3 predictions of the same car. Running non-max suppression (NMS) will select only the most accurate (highest probabiliy) one of the 3 boxes. <br> </center></caption>
Non-max suppression uses the very important function called **"Intersection over Union"**, or IoU.
<img src="nb_images/iou.png" style="width:500px;height:400;">
<caption><center> <u> **Figure 8** </u>: Definition of "Intersection over Union". <br> </center></caption>
**Exercise**: Implement iou(). Some hints:
- In this exercise only, we define a box using its two corners (upper left and lower right): (x1, y1, x2, y2) rather than the midpoint and height/width.
- To calculate the area of a rectangle you need to multiply its height (y2 - y1) by its width (x2 - x1)
- You'll also need to find the coordinates (xi1, yi1, xi2, yi2) of the intersection of two boxes. Remember that:
- xi1 = maximum of the x1 coordinates of the two boxes
- yi1 = maximum of the y1 coordinates of the two boxes
- xi2 = minimum of the x2 coordinates of the two boxes
- yi2 = minimum of the y2 coordinates of the two boxes
In this code, we use the convention that (0,0) is the top-left corner of an image, (1,0) is the upper-right corner, and (1,1) the lower-right corner.
```
# GRADED FUNCTION: iou
def iou(box1, box2):
"""Implement the intersection over union (IoU) between box1 and box2
Arguments:
box1 -- first box, list object with coordinates (x1, y1, x2, y2)
box2 -- second box, list object with coordinates (x1, y1, x2, y2)
"""
# Calculate the (y1, x1, y2, x2) coordinates of the intersection of box1 and box2. Calculate its Area.
### START CODE HERE ### (≈ 5 lines)
xi1 = np.maximum(box1[0],box2[0])
yi1 = np.maximum(box1[1],box2[1])
xi2 = np.minimum(box1[2],box2[2])
yi2 = np.minimum(box1[3],box2[3])
inter_area = (yi2-yi1) * (xi2-xi1)
### END CODE HERE ###
# Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B)
### START CODE HERE ### (≈ 3 lines)
box1_area = (box1[3]-box1[1]) * (box1[2]-box1[0])
box2_area = (box2[3]-box2[1]) * (box2[2]-box2[0])
union_area = box1_area + box2_area - inter_area
### END CODE HERE ###
# compute the IoU
### START CODE HERE ### (≈ 1 line)
iou = inter_area / union_area
### END CODE HERE ###
return iou
box1 = (2, 1, 4, 3)
box2 = (1, 2, 3, 4)
print("iou = " + str(iou(box1, box2)))
```
**Expected Output**:
<table>
<tr>
<td>
**iou = **
</td>
<td>
0.14285714285714285
</td>
</tr>
</table>
You are now ready to implement non-max suppression. The key steps are:
1. Select the box that has the highest score.
2. Compute its overlap with all other boxes, and remove boxes that overlap it more than `iou_threshold`.
3. Go back to step 1 and iterate until there's no more boxes with a lower score than the current selected box.
This will remove all boxes that have a large overlap with the selected boxes. Only the "best" boxes remain.
**Exercise**: Implement yolo_non_max_suppression() using TensorFlow. TensorFlow has two built-in functions that are used to implement non-max suppression (so you don't actually need to use your `iou()` implementation):
- [tf.image.non_max_suppression()](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression)
- [K.gather()](https://www.tensorflow.org/api_docs/python/tf/gather)
```
# GRADED FUNCTION: yolo_non_max_suppression
def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5):
"""
Applies Non-max suppression (NMS) to set of boxes
Arguments:
scores -- tensor of shape (None,), output of yolo_filter_boxes()
boxes -- tensor of shape (None, 4), output of yolo_filter_boxes() that have been scaled to the image size (see later)
classes -- tensor of shape (None,), output of yolo_filter_boxes()
max_boxes -- integer, maximum number of predicted boxes you'd like
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (, None), predicted score for each box
boxes -- tensor of shape (4, None), predicted box coordinates
classes -- tensor of shape (, None), predicted class for each box
Note: The "None" dimension of the output tensors has obviously to be less than max_boxes. Note also that this
function will transpose the shapes of scores, boxes, classes. This is made for convenience.
"""
max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression()
K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor
# Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep
### START CODE HERE ### (≈ 1 line)
nms_indices = tf.image.non_max_suppression(boxes,scores,max_boxes,iou_threshold)
### END CODE HERE ###
# Use K.gather() to select only nms_indices from scores, boxes and classes
### START CODE HERE ### (≈ 3 lines)
scores = K.gather(scores,nms_indices)
boxes = K.gather(boxes,nms_indices)
classes = K.gather(classes,nms_indices)
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_b:
scores = tf.random_normal([54,], mean=1, stddev=4, seed = 1)
boxes = tf.random_normal([54, 4], mean=1, stddev=4, seed = 1)
classes = tf.random_normal([54,], mean=1, stddev=4, seed = 1)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.eval().shape))
print("boxes.shape = " + str(boxes.eval().shape))
print("classes.shape = " + str(classes.eval().shape))
```
**Expected Output**:
<table>
<tr>
<td>
**scores[2]**
</td>
<td>
6.9384
</td>
</tr>
<tr>
<td>
**boxes[2]**
</td>
<td>
[-5.299932 3.13798141 4.45036697 0.95942086]
</td>
</tr>
<tr>
<td>
**classes[2]**
</td>
<td>
-2.24527
</td>
</tr>
<tr>
<td>
**scores.shape**
</td>
<td>
(10,)
</td>
</tr>
<tr>
<td>
**boxes.shape**
</td>
<td>
(10, 4)
</td>
</tr>
<tr>
<td>
**classes.shape**
</td>
<td>
(10,)
</td>
</tr>
</table>
### 2.4 Wrapping up the filtering
It's time to implement a function taking the output of the deep CNN (the 19x19x5x85 dimensional encoding) and filtering through all the boxes using the functions you've just implemented.
**Exercise**: Implement `yolo_eval()` which takes the output of the YOLO encoding and filters the boxes using score threshold and NMS. There's just one last implementational detail you have to know. There're a few ways of representing boxes, such as via their corners or via their midpoint and height/width. YOLO converts between a few such formats at different times, using the following functions (which we have provided):
```python
boxes = yolo_boxes_to_corners(box_xy, box_wh)
```
which converts the yolo box coordinates (x,y,w,h) to box corners' coordinates (x1, y1, x2, y2) to fit the input of `yolo_filter_boxes`
```python
boxes = scale_boxes(boxes, image_shape)
```
YOLO's network was trained to run on 608x608 images. If you are testing this data on a different size image--for example, the car detection dataset had 720x1280 images--this step rescales the boxes so that they can be plotted on top of the original 720x1280 image.
Don't worry about these two functions; we'll show you where they need to be called.
```
# GRADED FUNCTION: yolo_eval
def yolo_eval(yolo_outputs, image_shape = (720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5):
"""
Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes.
Arguments:
yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors:
box_confidence: tensor of shape (None, 19, 19, 5, 1)
box_xy: tensor of shape (None, 19, 19, 5, 2)
box_wh: tensor of shape (None, 19, 19, 5, 2)
box_class_probs: tensor of shape (None, 19, 19, 5, 80)
image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype)
max_boxes -- integer, maximum number of predicted boxes you'd like
score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (None, ), predicted score for each box
boxes -- tensor of shape (None, 4), predicted box coordinates
classes -- tensor of shape (None,), predicted class for each box
"""
### START CODE HERE ###
# Retrieve outputs of the YOLO model (≈1 line)
box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs
# Convert boxes to be ready for filtering functions
boxes = yolo_boxes_to_corners(box_xy, box_wh)
# Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line)
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, score_threshold)
# Scale boxes back to original image shape.
boxes = scale_boxes(boxes, image_shape)
# Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes, iou_threshold)
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_b:
yolo_outputs = (tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1))
scores, boxes, classes = yolo_eval(yolo_outputs)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.eval().shape))
print("boxes.shape = " + str(boxes.eval().shape))
print("classes.shape = " + str(classes.eval().shape))
```
**Expected Output**:
<table>
<tr>
<td>
**scores[2]**
</td>
<td>
138.791
</td>
</tr>
<tr>
<td>
**boxes[2]**
</td>
<td>
[ 1292.32971191 -278.52166748 3876.98925781 -835.56494141]
</td>
</tr>
<tr>
<td>
**classes[2]**
</td>
<td>
54
</td>
</tr>
<tr>
<td>
**scores.shape**
</td>
<td>
(10,)
</td>
</tr>
<tr>
<td>
**boxes.shape**
</td>
<td>
(10, 4)
</td>
</tr>
<tr>
<td>
**classes.shape**
</td>
<td>
(10,)
</td>
</tr>
</table>
<font color='blue'>
**Summary for YOLO**:
- Input image (608, 608, 3)
- The input image goes through a CNN, resulting in a (19,19,5,85) dimensional output.
- After flattening the last two dimensions, the output is a volume of shape (19, 19, 425):
- Each cell in a 19x19 grid over the input image gives 425 numbers.
- 425 = 5 x 85 because each cell contains predictions for 5 boxes, corresponding to 5 anchor boxes, as seen in lecture.
- 85 = 5 + 80 where 5 is because $(p_c, b_x, b_y, b_h, b_w)$ has 5 numbers, and and 80 is the number of classes we'd like to detect
- You then select only few boxes based on:
- Score-thresholding: throw away boxes that have detected a class with a score less than the threshold
- Non-max suppression: Compute the Intersection over Union and avoid selecting overlapping boxes
- This gives you YOLO's final output.
## 3 - Test YOLO pretrained model on images
In this part, you are going to use a pretrained model and test it on the car detection dataset. As usual, you start by **creating a session to start your graph**. Run the following cell.
```
sess = K.get_session()
```
### 3.1 - Defining classes, anchors and image shape.
Recall that we are trying to detect 80 classes, and are using 5 anchor boxes. We have gathered the information about the 80 classes and 5 boxes in two files "coco_classes.txt" and "yolo_anchors.txt". Let's load these quantities into the model by running the next cell.
The car detection dataset has 720x1280 images, which we've pre-processed into 608x608 images.
```
class_names = read_classes("model_data/coco_classes.txt")
anchors = read_anchors("model_data/yolo_anchors.txt")
image_shape = (720., 1280.)
```
### 3.2 - Loading a pretrained model
Training a YOLO model takes a very long time and requires a fairly large dataset of labelled bounding boxes for a large range of target classes. You are going to load an existing pretrained Keras YOLO model stored in "yolo.h5". (These weights come from the official YOLO website, and were converted using a function written by Allan Zelener. References are at the end of this notebook. Technically, these are the parameters from the "YOLOv2" model, but we will more simply refer to it as "YOLO" in this notebook.) Run the cell below to load the model from this file.
```
yolo_model = load_model("model_data/yolo.h5")
```
This loads the weights of a trained YOLO model. Here's a summary of the layers your model contains.
```
yolo_model.summary()
```
**Note**: On some computers, you may see a warning message from Keras. Don't worry about it if you do--it is fine.
**Reminder**: this model converts a preprocessed batch of input images (shape: (m, 608, 608, 3)) into a tensor of shape (m, 19, 19, 5, 85) as explained in Figure (2).
### 3.3 - Convert output of the model to usable bounding box tensors
The output of `yolo_model` is a (m, 19, 19, 5, 85) tensor that needs to pass through non-trivial processing and conversion. The following cell does that for you.
```
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
```
You added `yolo_outputs` to your graph. This set of 4 tensors is ready to be used as input by your `yolo_eval` function.
### 3.4 - Filtering boxes
`yolo_outputs` gave you all the predicted boxes of `yolo_model` in the correct format. You're now ready to perform filtering and select only the best boxes. Lets now call `yolo_eval`, which you had previously implemented, to do this.
```
scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)
```
### 3.5 - Run the graph on an image
Let the fun begin. You have created a (`sess`) graph that can be summarized as follows:
1. <font color='purple'> yolo_model.input </font> is given to `yolo_model`. The model is used to compute the output <font color='purple'> yolo_model.output </font>
2. <font color='purple'> yolo_model.output </font> is processed by `yolo_head`. It gives you <font color='purple'> yolo_outputs </font>
3. <font color='purple'> yolo_outputs </font> goes through a filtering function, `yolo_eval`. It outputs your predictions: <font color='purple'> scores, boxes, classes </font>
**Exercise**: Implement predict() which runs the graph to test YOLO on an image.
You will need to run a TensorFlow session, to have it compute `scores, boxes, classes`.
The code below also uses the following function:
```python
image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))
```
which outputs:
- image: a python (PIL) representation of your image used for drawing boxes. You won't need to use it.
- image_data: a numpy-array representing the image. This will be the input to the CNN.
**Important note**: when a model uses BatchNorm (as is the case in YOLO), you will need to pass an additional placeholder in the feed_dict {K.learning_phase(): 0}.
```
def predict(sess, image_file):
"""
Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.
Arguments:
sess -- your tensorflow/Keras session containing the YOLO graph
image_file -- name of an image stored in the "images" folder.
Returns:
out_scores -- tensor of shape (None, ), scores of the predicted boxes
out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
out_classes -- tensor of shape (None, ), class index of the predicted boxes
Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes.
"""
# Preprocess your image
image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))
# Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
# You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
### START CODE HERE ### (≈ 1 line)
out_scores, out_boxes, out_classes = sess.run(
[scores, boxes, classes],
feed_dict={
yolo_model.input: image_data,
K.learning_phase(): 0
})
### END CODE HERE ###
# Print predictions info
print('Found {} boxes for {}'.format(len(out_boxes), image_file))
# Generate colors for drawing bounding boxes.
colors = generate_colors(class_names)
# Draw bounding boxes on the image file
draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
# Save the predicted bounding box on the image
image.save(os.path.join("out", image_file), quality=90)
# Display the results in the notebook
output_image = scipy.misc.imread(os.path.join("out", image_file))
imshow(output_image)
return out_scores, out_boxes, out_classes
```
Run the following cell on the "test.jpg" image to verify that your function is correct.
```
out_scores, out_boxes, out_classes = predict(sess, "test.jpg")
```
**Expected Output**:
<table>
<tr>
<td>
**Found 7 boxes for test.jpg**
</td>
</tr>
<tr>
<td>
**car**
</td>
<td>
0.60 (925, 285) (1045, 374)
</td>
</tr>
<tr>
<td>
**car**
</td>
<td>
0.66 (706, 279) (786, 350)
</td>
</tr>
<tr>
<td>
**bus**
</td>
<td>
0.67 (5, 266) (220, 407)
</td>
</tr>
<tr>
<td>
**car**
</td>
<td>
0.70 (947, 324) (1280, 705)
</td>
</tr>
<tr>
<td>
**car**
</td>
<td>
0.74 (159, 303) (346, 440)
</td>
</tr>
<tr>
<td>
**car**
</td>
<td>
0.80 (761, 282) (942, 412)
</td>
</tr>
<tr>
<td>
**car**
</td>
<td>
0.89 (367, 300) (745, 648)
</td>
</tr>
</table>
The model you've just run is actually able to detect 80 different classes listed in "coco_classes.txt". To test the model on your own images:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Write your image's name in the cell above code
4. Run the code and see the output of the algorithm!
If you were to run your session in a for loop over all your images. Here's what you would get:
<center>
<video width="400" height="200" src="nb_images/pred_video_compressed2.mp4" type="video/mp4" controls>
</video>
</center>
<caption><center> Predictions of the YOLO model on pictures taken from a camera while driving around the Silicon Valley <br> Thanks [drive.ai](https://www.drive.ai/) for providing this dataset! </center></caption>
<font color='blue'>
**What you should remember**:
- YOLO is a state-of-the-art object detection model that is fast and accurate
- It runs an input image through a CNN which outputs a 19x19x5x85 dimensional volume.
- The encoding can be seen as a grid where each of the 19x19 cells contains information about 5 boxes.
- You filter through all the boxes using non-max suppression. Specifically:
- Score thresholding on the probability of detecting a class to keep only accurate (high probability) boxes
- Intersection over Union (IoU) thresholding to eliminate overlapping boxes
- Because training a YOLO model from randomly initialized weights is non-trivial and requires a large dataset as well as lot of computation, we used previously trained model parameters in this exercise. If you wish, you can also try fine-tuning the YOLO model with your own dataset, though this would be a fairly non-trivial exercise.
**References**: The ideas presented in this notebook came primarily from the two YOLO papers. The implementation here also took significant inspiration and used many components from Allan Zelener's github repository. The pretrained weights used in this exercise came from the official YOLO website.
- Joseph Redmon, Santosh Divvala, Ross Girshick, Ali Farhadi - [You Only Look Once: Unified, Real-Time Object Detection](https://arxiv.org/abs/1506.02640) (2015)
- Joseph Redmon, Ali Farhadi - [YOLO9000: Better, Faster, Stronger](https://arxiv.org/abs/1612.08242) (2016)
- Allan Zelener - [YAD2K: Yet Another Darknet 2 Keras](https://github.com/allanzelener/YAD2K)
- The official YOLO website (https://pjreddie.com/darknet/yolo/)
**Car detection dataset**:
<a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">The Drive.ai Sample Dataset</span> (provided by drive.ai) is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</a>. We are especially grateful to Brody Huval, Chih Hu and Rahul Patel for collecting and providing this dataset.
| github_jupyter |
# Do Neural Networks overfit?
This brief post is exploring overfitting neural networks. It comes from reading the paper:
Towards Understanding Generalization of Deep Learning: Perspective of Loss Landscapes
https://arxiv.org/pdf/1706.10239.pdf
We show that fitting a hugely overparameterised model to some linear regression data works absolutely fine... The results are quite cool so I thought I would double check.
```
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
plt.style.use("seaborn-whitegrid")
rng = np.random.RandomState(0)
tf.random.set_seed(0)
```
Lets generate some linear regression data. We only generate **100 data points**. This is simply a straight line with Gaussian noise - a problem linear regression is optimal for.
```
x = np.random.uniform(0, 20, size=(100))
true_data = lambda x: 3 + 0.7*x
y = true_data(x) + np.random.normal(scale=2.0, size=(100))
fig, ax = plt.subplots(figsize=(10,6))
ax.plot(x, y, '.')
plt.show()
```
Lets try fit this with a neural network. The network is deliberating over complex with over **40,000 parameters** to tune and the relu activation function for non-linearity.
```
model = tf.keras.Sequential(
[
tf.keras.layers.Dense(
100, input_shape=(1,), activation=tf.keras.activations.relu
),
tf.keras.layers.Dense(
100, activation=tf.keras.activations.relu
),
tf.keras.layers.Dense(
100, activation=tf.keras.activations.relu
),
tf.keras.layers.Dense(
100, activation=tf.keras.activations.relu
),
tf.keras.layers.Dense(
100, activation=tf.keras.activations.relu
),
tf.keras.layers.Dense(1),
]
)
model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.01), loss=tf.keras.losses.mse
)
model.summary()
history = model.fit(x, y, epochs=200, verbose=False)
plt.plot(history.history["loss"])
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
```
If we make predictions from the overly complex neural network we would expect some drastically overfit results...
```
x_eval = np.linspace(0, 20, 1000)
y_eval = model.predict(x_eval)
fig, ax = plt.subplots(figsize=(10,6))
ax.plot(x, y, '.', label='data')
ax.plot(x_eval, y_eval, label='NN')
plt.legend()
plt.show()
```
The results are pretty reasonable! There isnt a crazy line that passing through all our points.
We can compare this to the results from linear regression. For laziness, we do this in tensorflow using a single layer linear network:
```
model_linear = tf.keras.Sequential(
[
tf.keras.layers.Dense(1, input_shape=(1,)),
]
)
model_linear.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.05), loss=tf.keras.losses.mse
)
model_linear.summary()
history_linear = model_linear.fit(x, y, epochs=200, verbose=False)
plt.plot(history_linear.history["loss"])
y_linear_eval = model_linear.predict(x_eval)
fig, ax = plt.subplots(figsize=(10,6))
ax.plot(x, y, '.', label='data')
ax.plot(x_eval, y_eval, label='NN')
ax.plot(x_eval, y_linear_eval, label='linear regression')
plt.legend()
plt.show()
```
The two models look pretty similar. For more details on why this is the case - please refer to the paper in the introduction.
| github_jupyter |
<a href="https://colab.research.google.com/github/falconlee236/handson-ml2/blob/master/chapter4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import numpy as np
import matplotlib.pyplot as plt
X = 2 * np.random.rand(100, 1);
y = 4 + 3 * X + np.random.randn(100, 1)
plt.scatter(X, y)
plt.ylabel('y')
plt.xlabel('x')
X_b = np.c_[np.ones((100, 1)), X]
theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)
theta_best
X_new = np.array([[0], [2]])
X_new_b = np.c_[np.ones((2, 1)), X_new]
y_predict = X_new_b.dot(theta_best)
y_predict
plt.plot(X_new, y_predict, 'r-')
plt.plot(X, y, 'b.')
plt.axis([0, 2, 0, 15])
plt.show()
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
lin_reg.intercept_, lin_reg.coef_
lin_reg.predict(X_new)
theta_best_svd, residuals, rank, s = np.linalg.lstsq(X_b, y, rcond=1e-6)
theta_best_svd
np.linalg.pinv(X_b).dot(y)
eta = 0.1
n_iterations = 1000
m = 100
theta = np.random.randn(2, 1)
for iteration in range(n_iterations):
gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y)
theta -= eta * gradients
theta
from sklearn.linear_model import SGDRegressor
sgd_reg = SGDRegressor(max_iter=1000, tol=1e-3, penalty=None, eta0=0.1)
sgd_reg.fit(X, y.ravel())
sgd_reg.intercept_, sgd_reg.coef_
m = 100
X = 6 * np.random.rand(m, 1) - 3
y = 0.5 * X**2 + X+ 2 + np.random.randn(m, 1)
plt.plot(X, y, 'b.')
from sklearn.preprocessing import PolynomialFeatures
poly_features = PolynomialFeatures(degree=2, include_bias=False)
X_poly = poly_features.fit_transform(X)
X[0], X_poly[0]
lin_reg = LinearRegression()
lin_reg.fit(X_poly, y)
lin_reg.intercept_, lin_reg.coef_
X_new = np.linspace(-3, 3, 100).reshape(100, 1)
X_new_poly = poly_features.fit_transform(X_new)
y_new = lin_reg.predict(X_new_poly)
plt.plot(X, y, 'b.')
plt.plot(X_new, y_new, 'r-')
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
def plot_learning_curves(model, X, y):
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
train_errors, val_errors = [], []
for m in range(1, len(X_train)):
model.fit(X_train[:m], y_train[:m])
y_train_predict = model.predict(X_train[:m])
y_val_predict = model.predict(X_val)
train_errors.append(mean_squared_error(y_train[:m], y_train_predict))
val_errors.append(mean_squared_error(y_val, y_val_predict))
plt.plot(np.sqrt(train_errors), 'r-+', linewidth=2, label='train set')
plt.plot(np.sqrt(val_errors), 'b-', linewidth=3, label='validation set')
plt.ylim(top = 3, bottom = 0)
plt.legend()
lin_reg = LinearRegression()
plot_learning_curves(lin_reg, X, y)
from sklearn.pipeline import Pipeline
polynomial_regression = Pipeline([
('poly_features', PolynomialFeatures(degree=10, include_bias=False)),
('lin_reg', LinearRegression())
])
plot_learning_curves(polynomial_regression, X, y)
from sklearn.linear_model import Ridge
ridge_reg = Ridge(alpha=1, solver='cholesky')
ridge_reg.fit(X, y)
ridge_reg.predict([[1.5]])
sgd_reg = SGDRegressor(penalty='l2')
sgd_reg.fit(X, y.ravel())
sgd_reg.predict([[1.5]])
from sklearn.linear_model import Lasso
lasso_reg = Lasso(alpha=0.1)
lasso_reg.fit(X, y)
lasso_reg.predict([[1.5]])
from sklearn.linear_model import ElasticNet
elastic_net = ElasticNet(alpha=0.1, l1_ratio=0.5)
elastic_net.fit(X, y)
elastic_net.predict([[1.5]])
from sklearn import datasets
iris = datasets.load_iris()
list(iris.keys())
X = iris["data"][:, 3:]
y = (iris['target'] == 2).astype(np.int)
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(X, y)
X_new = np.linspace(0, 3, 1000).reshape(-1, 1)
y_proba = log_reg.predict_proba(X_new)
plt.plot(X_new, y_proba[:, 1], 'g-', label='Iris virginica')
plt.plot(X_new, y_proba[:, 0], 'b-', label='Not Iris virginica')
X = iris['data'][:, (2, 3)]
y = iris['target']
softmax_reg = LogisticRegression(multi_class="multinomial", solver='lbfgs', C=18)
softmax_reg.fit(X, y)
softmax_reg.predict([[5, 2]])
softmax_reg.predict_proba([[5, 2]])
```
| github_jupyter |
```
import os
import csv
import platform
import pandas as pd
import networkx as nx
from graph_partitioning import GraphPartitioning, utils
run_metrics = True
cols = ["WASTE", "CUT RATIO", "EDGES CUT", "TOTAL COMM VOLUME", "Qds", "CONDUCTANCE", "MAXPERM", "NMI", "FSCORE", "FSCORE RELABEL IMPROVEMENT", "LONELINESS"]
#cols = ["WASTE", "CUT RATIO", "EDGES CUT", "TOTAL COMM VOLUME", "Q", "Qds", "CONDUCTANCE", "LONELINESS", "NETWORK PERMANENCE", "NORM. MUTUAL INFO", "EDGE CUT WEIGHT", "FSCORE", "FSCORE RELABEL IMPROVEMENT"]
#cols = ["WASTE", "CUT RATIO", "EDGES CUT", "TOTAL COMM VOLUME", "MODULARITY", "LONELINESS", "NETWORK PERMANENCE", "NORM. MUTUAL INFO", "EDGE CUT WEIGHT", "FSCORE", "FSCORE RELABEL IMPROVEMENT"]
pwd = %pwd
config = {
"DATA_FILENAME": os.path.join(pwd, "data", "predition_model_tests", "network", "network_$$.txt"),
"OUTPUT_DIRECTORY": os.path.join(pwd, "output"),
# Set which algorithm is run for the PREDICTION MODEL.
# Either: 'FENNEL' or 'SCOTCH'
"PREDICTION_MODEL_ALGORITHM": "FENNEL",
# Alternativly, read input file for prediction model.
# Set to empty to generate prediction model using algorithm value above.
"PREDICTION_MODEL": "",
"PARTITIONER_ALGORITHM": "FENNEL",
# File containing simulated arrivals. This is used in simulating nodes
# arriving at the shelter. Nodes represented by line number; value of
# 1 represents a node as arrived; value of 0 represents the node as not
# arrived or needing a shelter.
"SIMULATED_ARRIVAL_FILE": os.path.join(pwd,
"data",
"predition_model_tests",
"dataset_1_shift_rotate",
"simulated_arrival_list",
"percentage_of_prediction_correct_100",
"arrival_100_$$.txt"
),
# File containing the prediction of a node arriving. This is different to the
# simulated arrivals, the values in this file are known before the disaster.
"PREDICTION_LIST_FILE": os.path.join(pwd,
"data",
"predition_model_tests",
"dataset_1_shift_rotate",
"prediction_list",
"prediction_$$.txt"
),
# File containing the geographic location of each node, in "x,y" format.
"POPULATION_LOCATION_FILE": os.path.join(pwd,
"data",
"predition_model_tests",
"coordinates",
"coordinates_$$.txt"
),
# Number of shelters
"num_partitions": 4,
# The number of iterations when making prediction model
"num_iterations": 1,
# Percentage of prediction model to use before discarding
# When set to 0, prediction model is discarded, useful for one-shot
"prediction_model_cut_off": .0,
# Alpha value used in one-shot (when restream_batches set to 1)
"one_shot_alpha": 0.5,
"use_one_shot_alpha": False,
# Number of arrivals to batch before recalculating alpha and restreaming.
"restream_batches": 50,
# When the batch size is reached: if set to True, each node is assigned
# individually as first in first out. If set to False, the entire batch
# is processed and empty before working on the next batch.
"sliding_window": False,
# Create virtual nodes based on prediction model
"use_virtual_nodes": False,
# Virtual nodes: edge weight
"virtual_edge_weight": 1.0,
# Loneliness score parameter. Used when scoring a partition by how many
# lonely nodes exist.
"loneliness_score_param": 1.2,
####
# GRAPH MODIFICATION FUNCTIONS
# Also enables the edge calculation function.
"graph_modification_functions": True,
# If set, the node weight is set to 100 if the node arrives at the shelter,
# otherwise the node is removed from the graph.
"alter_arrived_node_weight_to_100": False,
# Uses generalized additive models from R to generate prediction of nodes not
# arrived. This sets the node weight on unarrived nodes the the prediction
# given by a GAM.
# Needs POPULATION_LOCATION_FILE to be set.
"alter_node_weight_to_gam_prediction": False,
# Enables edge expansion when graph_modification_functions is set to true
"edge_expansion_enabled": True,
# The value of 'k' used in the GAM will be the number of nodes arrived until
# it reaches this max value.
"gam_k_value": 100,
# Alter the edge weight for nodes that haven't arrived. This is a way to
# de-emphasise the prediction model for the unknown nodes.
"prediction_model_emphasis": 1.0,
# This applies the prediction_list_file node weights onto the nodes in the graph
# when the prediction model is being computed and then removes the weights
# for the cutoff and batch arrival modes
"apply_prediction_model_weights": True,
"compute_metrics_enabled": True,
"SCOTCH_LIB_PATH": os.path.join(pwd, "libs/scotch/macOS/libscotch.dylib")
if 'Darwin' in platform.system()
else "/usr/local/lib/libscotch.so",
# Path to the PaToH shared library
"PATOH_LIB_PATH": os.path.join(pwd, "libs/patoh/lib/macOS/libpatoh.dylib")
if 'Darwin' in platform.system()
else os.path.join(pwd, "libs/patoh/lib/linux/libpatoh.so"),
"PATOH_ITERATIONS": 5,
# Expansion modes: 'avg_node_weight', 'total_node_weight', 'smallest_node_weight'
# 'largest_node_weight'
# add '_squared' or '_sqrt' at the end of any of the above for ^2 or sqrt(weight)
# i.e. 'avg_node_weight_squared
"PATOH_HYPEREDGE_EXPANSION_MODE": 'no_expansion',
# Edge Expansion: average, total, minimum, maximum, product, product_squared, sqrt_product
"EDGE_EXPANSION_MODE" : 'total',
# Whether nodes should be reordered using a centrality metric for optimal node assignments in batch mode
# This is specific to FENNEL and at the moment Leverage Centrality is used to compute new noder orders
"FENNEL_NODE_REORDERING_ENABLED": False,
# The node ordering scheme: PII_LH (political index), LEVERAGE_HL, DEGREE_HL, BOTTLENECK_HL
"FENNEL_NODE_REODERING_SCHEME": 'BOTTLENECK_HL',
# Whether the Friend of a Friend scoring system is active during FENNEL partitioning.
# FOAF employs information about a node's friends to determine the best partition when
# this node arrives at a shelter and no shelter has friends already arrived
"FENNEL_FRIEND_OF_A_FRIEND_ENABLED": False,
# Alters how much information to print. Keep it at 1 for this notebook.
# 0 - will print nothing, useful for batch operations.
# 1 - prints basic information on assignments and operations.
# 2 - prints more information as it batches arrivals.
"verbose": 1
}
#gp = GraphPartitioning(config)
# Optional: shuffle the order of nodes arriving
# Arrival order should not be shuffled if using GAM to alter node weights
#random.shuffle(gp.arrival_order)
%pylab inline
import scipy
from copy import deepcopy
iterations = 1000 # the number of individual networks to run
# BOTTLENECK 1 Restream, no FOAF, Lonely after
# change these variables:
ordering_enabled_mode = [True]#[False, True]
for mode in ordering_enabled_mode:
#for mode in range(1, 51):
metricsDataPrediction = []
metricsDataAssign = []
config['FENNEL_NODE_REORDERING_ENABLED'] = mode
config['FENNEL_NODE_REORDERING_SCHEME'] = 'BOTTLENECK_HL'
config['FENNEL_FRIEND_OF_A_FRIEND_ENABLED'] = False
print('Mode', mode)
for i in range(0, iterations):
if (i % 50) == 0:
print('Mode', mode, 'Iteration', str(i))
conf = deepcopy(config)
#if mode == 'no_expansion':
# config['edge_expansion_enabled'] = False
#conf["DATA_FILENAME"] = os.path.join(pwd, "data", "predition_model_tests", "network", "network_" + str(i + 1) + ".txt")
conf["DATA_FILENAME"] = conf["DATA_FILENAME"].replace('$$', str(i + 1))
conf["SIMULATED_ARRIVAL_FILE"] = conf["SIMULATED_ARRIVAL_FILE"].replace('$$', str(i + 1))
conf["PREDICTION_LIST_FILE"] = conf["PREDICTION_LIST_FILE"].replace('$$', str(i + 1))
conf["POPULATION_LOCATION_FILE"] = conf["POPULATION_LOCATION_FILE"].replace('$$', str(i + 1))
conf["compute_metrics_enabled"] = False
conf['PREDICTION_MODEL'] = conf['PREDICTION_MODEL'].replace('$$', str(i + 1))
#print(i, conf)
#print('config', config)
with GraphPartitioning(conf) as gp:
#gp = GraphPartitioning(config)
gp.verbose = 0
gp.load_network()
gp.init_partitioner()
m = gp.prediction_model()
m = gp.assign_cut_off()
m = gp.batch_arrival()
Gsub = gp.G.subgraph(gp.nodes_arrived)
gp.compute_metrics_enabled = True
m = [gp._print_score(Gsub)]
gp.compute_metrics_enabled = False
totalM = len(m)
metricsDataPrediction.append(m[totalM - 1])
waste = ''
cutratio = ''
ec = ''
tcv = ''
qds = ''
conductance = ''
maxperm = ''
nmi = ''
lonliness = ''
fscore = ''
fscoreimprove = ''
qdsOv = ''
condOv = ''
dataWaste = []
dataCutRatio = []
dataEC = []
dataTCV = []
dataQDS = []
dataCOND = []
dataMAXPERM = []
dataNMI = []
dataLonliness = []
dataFscore = []
dataFscoreImprove = []
for i in range(0, iterations):
dataWaste.append(metricsDataPrediction[i][0])
dataCutRatio.append(metricsDataPrediction[i][1])
dataEC.append(metricsDataPrediction[i][2])
dataTCV.append(metricsDataPrediction[i][3])
dataQDS.append(metricsDataPrediction[i][4])
dataCOND.append(metricsDataPrediction[i][5])
dataMAXPERM.append(metricsDataPrediction[i][6])
dataNMI.append(metricsDataPrediction[i][7])
dataFscore.append(metricsDataPrediction[i][8])
dataFscoreImprove.append(metricsDataPrediction[i][9])
dataLonliness.append(metricsDataPrediction[i][10])
if(len(waste)):
waste = waste + ','
waste = waste + str(metricsDataPrediction[i][0])
if(len(cutratio)):
cutratio = cutratio + ','
cutratio = cutratio + str(metricsDataPrediction[i][1])
if(len(ec)):
ec = ec + ','
ec = ec + str(metricsDataPrediction[i][2])
if(len(tcv)):
tcv = tcv + ','
tcv = tcv + str(metricsDataPrediction[i][3])
if(len(qds)):
qds = qds + ','
qds = qds + str(metricsDataPrediction[i][4])
if(len(conductance)):
conductance = conductance + ','
conductance = conductance + str(metricsDataPrediction[i][5])
if(len(maxperm)):
maxperm = maxperm + ','
maxperm = maxperm + str(metricsDataPrediction[i][6])
if(len(nmi)):
nmi = nmi + ','
nmi = nmi + str(metricsDataPrediction[i][7])
if(len(fscore)):
fscore = fscore + ','
fscore = fscore + str(metricsDataPrediction[i][8])
if(len(fscoreimprove)):
fscoreimprove = fscoreimprove + ','
fscoreimprove = fscoreimprove + str(metricsDataPrediction[i][8])
if(len(lonliness)):
lonliness = lonliness + ','
lonliness = lonliness + str(dataLonliness[i])
waste = 'WASTE,' + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataWaste)) + ',' + str(scipy.std(dataWaste)) + ',' + waste
cutratio = 'CUT_RATIO,' + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataCutRatio)) + ',' + str(scipy.std(dataCutRatio)) + ',' + cutratio
ec = 'EC,' + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataEC)) + ',' + str(scipy.std(dataEC)) + ',' + ec
tcv = 'TCV,' + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataTCV)) + ',' + str(scipy.std(dataTCV)) + ',' + tcv
lonliness = "LONELINESS," + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataLonliness)) + ',' + str(scipy.std(dataLonliness)) + ',' + lonliness
qds = 'QDS,' + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataQDS)) + ',' + str(scipy.std(dataQDS)) + ',' + qds
conductance = 'CONDUCTANCE,' + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataCOND)) + ',' + str(scipy.std(dataCOND)) + ',' + conductance
maxperm = 'MAXPERM,' + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataMAXPERM)) + ',' + str(scipy.std(dataMAXPERM)) + ',' + maxperm
nmi = 'NMI,' + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataNMI)) + ',' + str(scipy.std(dataNMI)) + ',' + nmi
fscore = "FSCORE," + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataFscore)) + ',' + str(scipy.std(dataFscore)) + ',' + fscore
fscoreimprove = "FSCORE_IMPROVE," + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataFscoreImprove)) + ',' + str(scipy.std(dataFscoreImprove)) + ',' + fscoreimprove
print(waste)
print(cutratio)
print(ec)
print(tcv)
print(lonliness)
print(qds)
print(conductance)
print(maxperm)
print(fscore)
print(fscoreimprove)
```
| github_jupyter |
# Data Scientist Nanodegree
## Supervised Learning
## Project: Finding Donors for *CharityML*
Welcome to the first project of the Data Scientist Nanodegree! In this notebook, some template code has already been provided for you, and it will be your job to implement the additional functionality necessary to successfully complete this project. Sections that begin with **'Implementation'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `'TODO'` statement. Please be sure to read the instructions carefully!
In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.
>**Note:** Please specify WHICH VERSION OF PYTHON you are using when submitting this notebook. Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
## Getting Started
In this project, you will employ several supervised algorithms of your choice to accurately model individuals' income using data collected from the 1994 U.S. Census. You will then choose the best candidate algorithm from preliminary results and further optimize this algorithm to best model the data. Your goal with this implementation is to construct a model that accurately predicts whether an individual makes more than $50,000. This sort of task can arise in a non-profit setting, where organizations survive on donations. Understanding an individual's income can help a non-profit better understand how large of a donation to request, or whether or not they should reach out to begin with. While it can be difficult to determine an individual's general income bracket directly from public sources, we can (as we will see) infer this value from other publically available features.
The dataset for this project originates from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Census+Income). The datset was donated by Ron Kohavi and Barry Becker, after being published in the article _"Scaling Up the Accuracy of Naive-Bayes Classifiers: A Decision-Tree Hybrid"_. You can find the article by Ron Kohavi [online](https://www.aaai.org/Papers/KDD/1996/KDD96-033.pdf). The data we investigate here consists of small changes to the original dataset, such as removing the `'fnlwgt'` feature and records with missing or ill-formatted entries.
----
## Exploring the Data
Run the code cell below to load necessary Python libraries and load the census data. Note that the last column from this dataset, `'income'`, will be our target label (whether an individual makes more than, or at most, $50,000 annually). All other columns are features about each individual in the census database.
```
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from time import time
from IPython.display import display # Allows the use of display() for DataFrames
# Import supplementary visualization code visuals.py
import visuals as vs
# Pretty display for notebooks
%matplotlib inline
# Load the Census dataset
data = pd.read_csv("census.csv")
# Success - Display the first record
display(data.head(n=1))
```
### Implementation: Data Exploration
A cursory investigation of the dataset will determine how many individuals fit into either group, and will tell us about the percentage of these individuals making more than \$50,000. In the code cell below, you will need to compute the following:
- The total number of records, `'n_records'`
- The number of individuals making more than \$50,000 annually, `'n_greater_50k'`.
- The number of individuals making at most \$50,000 annually, `'n_at_most_50k'`.
- The percentage of individuals making more than \$50,000 annually, `'greater_percent'`.
** HINT: ** You may need to look at the table above to understand how the `'income'` entries are formatted.
```
# TODO: Total number of records
n_records = None
# TODO: Number of records where individual's income is more than $50,000
n_greater_50k = None
# TODO: Number of records where individual's income is at most $50,000
n_at_most_50k = None
# TODO: Percentage of individuals whose income is more than $50,000
greater_percent = None
# Print the results
print("Total number of records: {}".format(n_records))
print("Individuals making more than $50,000: {}".format(n_greater_50k))
print("Individuals making at most $50,000: {}".format(n_at_most_50k))
print("Percentage of individuals making more than $50,000: {}%".format(greater_percent))
```
** Featureset Exploration **
* **age**: continuous.
* **workclass**: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
* **education**: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
* **education-num**: continuous.
* **marital-status**: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.
* **occupation**: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.
* **relationship**: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
* **race**: Black, White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other.
* **sex**: Female, Male.
* **capital-gain**: continuous.
* **capital-loss**: continuous.
* **hours-per-week**: continuous.
* **native-country**: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
----
## Preparing the Data
Before data can be used as input for machine learning algorithms, it often must be cleaned, formatted, and restructured — this is typically known as **preprocessing**. Fortunately, for this dataset, there are no invalid or missing entries we must deal with, however, there are some qualities about certain features that must be adjusted. This preprocessing can help tremendously with the outcome and predictive power of nearly all learning algorithms.
### Transforming Skewed Continuous Features
A dataset may sometimes contain at least one feature whose values tend to lie near a single number, but will also have a non-trivial number of vastly larger or smaller values than that single number. Algorithms can be sensitive to such distributions of values and can underperform if the range is not properly normalized. With the census dataset two features fit this description: '`capital-gain'` and `'capital-loss'`.
Run the code cell below to plot a histogram of these two features. Note the range of the values present and how they are distributed.
```
# Split the data into features and target label
income_raw = data['income']
features_raw = data.drop('income', axis = 1)
# Visualize skewed continuous features of original data
vs.distribution(data)
```
For highly-skewed feature distributions such as `'capital-gain'` and `'capital-loss'`, it is common practice to apply a <a href="https://en.wikipedia.org/wiki/Data_transformation_(statistics)">logarithmic transformation</a> on the data so that the very large and very small values do not negatively affect the performance of a learning algorithm. Using a logarithmic transformation significantly reduces the range of values caused by outliers. Care must be taken when applying this transformation however: The logarithm of `0` is undefined, so we must translate the values by a small amount above `0` to apply the the logarithm successfully.
Run the code cell below to perform a transformation on the data and visualize the results. Again, note the range of values and how they are distributed.
```
# Log-transform the skewed features
skewed = ['capital-gain', 'capital-loss']
features_log_transformed = pd.DataFrame(data = features_raw)
features_log_transformed[skewed] = features_raw[skewed].apply(lambda x: np.log(x + 1))
# Visualize the new log distributions
vs.distribution(features_log_transformed, transformed = True)
```
### Normalizing Numerical Features
In addition to performing transformations on features that are highly skewed, it is often good practice to perform some type of scaling on numerical features. Applying a scaling to the data does not change the shape of each feature's distribution (such as `'capital-gain'` or `'capital-loss'` above); however, normalization ensures that each feature is treated equally when applying supervised learners. Note that once scaling is applied, observing the data in its raw form will no longer have the same original meaning, as exampled below.
Run the code cell below to normalize each numerical feature. We will use [`sklearn.preprocessing.MinMaxScaler`](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html) for this.
```
# Import sklearn.preprocessing.StandardScaler
from sklearn.preprocessing import MinMaxScaler
# Initialize a scaler, then apply it to the features
scaler = MinMaxScaler() # default=(0, 1)
numerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']
features_log_minmax_transform = pd.DataFrame(data = features_log_transformed)
features_log_minmax_transform[numerical] = scaler.fit_transform(features_log_transformed[numerical])
# Show an example of a record with scaling applied
display(features_log_minmax_transform.head(n = 5))
```
### Implementation: Data Preprocessing
From the table in **Exploring the Data** above, we can see there are several features for each record that are non-numeric. Typically, learning algorithms expect input to be numeric, which requires that non-numeric features (called *categorical variables*) be converted. One popular way to convert categorical variables is by using the **one-hot encoding** scheme. One-hot encoding creates a _"dummy"_ variable for each possible category of each non-numeric feature. For example, assume `someFeature` has three possible entries: `A`, `B`, or `C`. We then encode this feature into `someFeature_A`, `someFeature_B` and `someFeature_C`.
| | someFeature | | someFeature_A | someFeature_B | someFeature_C |
| :-: | :-: | | :-: | :-: | :-: |
| 0 | B | | 0 | 1 | 0 |
| 1 | C | ----> one-hot encode ----> | 0 | 0 | 1 |
| 2 | A | | 1 | 0 | 0 |
Additionally, as with the non-numeric features, we need to convert the non-numeric target label, `'income'` to numerical values for the learning algorithm to work. Since there are only two possible categories for this label ("<=50K" and ">50K"), we can avoid using one-hot encoding and simply encode these two categories as `0` and `1`, respectively. In code cell below, you will need to implement the following:
- Use [`pandas.get_dummies()`](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html?highlight=get_dummies#pandas.get_dummies) to perform one-hot encoding on the `'features_log_minmax_transform'` data.
- Convert the target label `'income_raw'` to numerical entries.
- Set records with "<=50K" to `0` and records with ">50K" to `1`.
```
# TODO: One-hot encode the 'features_log_minmax_transform' data using pandas.get_dummies()
features_final = None
# TODO: Encode the 'income_raw' data to numerical values
income = None
# Print the number of features after one-hot encoding
encoded = list(features_final.columns)
print("{} total features after one-hot encoding.".format(len(encoded)))
# Uncomment the following line to see the encoded feature names
# print encoded
```
### Shuffle and Split Data
Now all _categorical variables_ have been converted into numerical features, and all numerical features have been normalized. As always, we will now split the data (both features and their labels) into training and test sets. 80% of the data will be used for training and 20% for testing.
Run the code cell below to perform this split.
```
# Import train_test_split
from sklearn.cross_validation import train_test_split
# Split the 'features' and 'income' data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(features_final,
income,
test_size = 0.2,
random_state = 0)
# Show the results of the split
print("Training set has {} samples.".format(X_train.shape[0]))
print("Testing set has {} samples.".format(X_test.shape[0]))
```
----
## Evaluating Model Performance
In this section, we will investigate four different algorithms, and determine which is best at modeling the data. Three of these algorithms will be supervised learners of your choice, and the fourth algorithm is known as a *naive predictor*.
### Metrics and the Naive Predictor
*CharityML*, equipped with their research, knows individuals that make more than \$50,000 are most likely to donate to their charity. Because of this, *CharityML* is particularly interested in predicting who makes more than \$50,000 accurately. It would seem that using **accuracy** as a metric for evaluating a particular model's performace would be appropriate. Additionally, identifying someone that *does not* make more than \$50,000 as someone who does would be detrimental to *CharityML*, since they are looking to find individuals willing to donate. Therefore, a model's ability to precisely predict those that make more than \$50,000 is *more important* than the model's ability to **recall** those individuals. We can use **F-beta score** as a metric that considers both precision and recall:
$$ F_{\beta} = (1 + \beta^2) \cdot \frac{precision \cdot recall}{\left( \beta^2 \cdot precision \right) + recall} $$
In particular, when $\beta = 0.5$, more emphasis is placed on precision. This is called the **F$_{0.5}$ score** (or F-score for simplicity).
Looking at the distribution of classes (those who make at most \$50,000, and those who make more), it's clear most individuals do not make more than \$50,000. This can greatly affect **accuracy**, since we could simply say *"this person does not make more than \$50,000"* and generally be right, without ever looking at the data! Making such a statement would be called **naive**, since we have not considered any information to substantiate the claim. It is always important to consider the *naive prediction* for your data, to help establish a benchmark for whether a model is performing well. That been said, using that prediction would be pointless: If we predicted all people made less than \$50,000, *CharityML* would identify no one as donors.
#### Note: Recap of accuracy, precision, recall
** Accuracy ** measures how often the classifier makes the correct prediction. It’s the ratio of the number of correct predictions to the total number of predictions (the number of test data points).
** Precision ** tells us what proportion of messages we classified as spam, actually were spam.
It is a ratio of true positives(words classified as spam, and which are actually spam) to all positives(all words classified as spam, irrespective of whether that was the correct classificatio), in other words it is the ratio of
`[True Positives/(True Positives + False Positives)]`
** Recall(sensitivity)** tells us what proportion of messages that actually were spam were classified by us as spam.
It is a ratio of true positives(words classified as spam, and which are actually spam) to all the words that were actually spam, in other words it is the ratio of
`[True Positives/(True Positives + False Negatives)]`
For classification problems that are skewed in their classification distributions like in our case, for example if we had a 100 text messages and only 2 were spam and the rest 98 weren't, accuracy by itself is not a very good metric. We could classify 90 messages as not spam(including the 2 that were spam but we classify them as not spam, hence they would be false negatives) and 10 as spam(all 10 false positives) and still get a reasonably good accuracy score. For such cases, precision and recall come in very handy. These two metrics can be combined to get the F1 score, which is weighted average(harmonic mean) of the precision and recall scores. This score can range from 0 to 1, with 1 being the best possible F1 score(we take the harmonic mean as we are dealing with ratios).
### Question 1 - Naive Predictor Performace
* If we chose a model that always predicted an individual made more than $50,000, what would that model's accuracy and F-score be on this dataset? You must use the code cell below and assign your results to `'accuracy'` and `'fscore'` to be used later.
** Please note ** that the the purpose of generating a naive predictor is simply to show what a base model without any intelligence would look like. In the real world, ideally your base model would be either the results of a previous model or could be based on a research paper upon which you are looking to improve. When there is no benchmark model set, getting a result better than random choice is a place you could start from.
** HINT: **
* When we have a model that always predicts '1' (i.e. the individual makes more than 50k) then our model will have no True Negatives(TN) or False Negatives(FN) as we are not making any negative('0' value) predictions. Therefore our Accuracy in this case becomes the same as our Precision(True Positives/(True Positives + False Positives)) as every prediction that we have made with value '1' that should have '0' becomes a False Positive; therefore our denominator in this case is the total number of records we have in total.
* Our Recall score(True Positives/(True Positives + False Negatives)) in this setting becomes 1 as we have no False Negatives.
```
'''
TP = np.sum(income) # Counting the ones as this is the naive case. Note that 'income' is the 'income_raw' data
encoded to numerical values done in the data preprocessing step.
FP = income.count() - TP # Specific to the naive case
TN = 0 # No predicted negatives in the naive case
FN = 0 # No predicted negatives in the naive case
'''
# TODO: Calculate accuracy, precision and recall
accuracy = None
recall = None
precision = None
# TODO: Calculate F-score using the formula above for beta = 0.5 and correct values for precision and recall.
fscore = None
# Print the results
print("Naive Predictor: [Accuracy score: {:.4f}, F-score: {:.4f}]".format(accuracy, fscore))
```
### Supervised Learning Models
**The following are some of the supervised learning models that are currently available in** [`scikit-learn`](http://scikit-learn.org/stable/supervised_learning.html) **that you may choose from:**
- Gaussian Naive Bayes (GaussianNB)
- Decision Trees
- Ensemble Methods (Bagging, AdaBoost, Random Forest, Gradient Boosting)
- K-Nearest Neighbors (KNeighbors)
- Stochastic Gradient Descent Classifier (SGDC)
- Support Vector Machines (SVM)
- Logistic Regression
### Question 2 - Model Application
List three of the supervised learning models above that are appropriate for this problem that you will test on the census data. For each model chosen
- Describe one real-world application in industry where the model can be applied.
- What are the strengths of the model; when does it perform well?
- What are the weaknesses of the model; when does it perform poorly?
- What makes this model a good candidate for the problem, given what you know about the data?
** HINT: **
Structure your answer in the same format as above^, with 4 parts for each of the three models you pick. Please include references with your answer.
**Answer: **
### Implementation - Creating a Training and Predicting Pipeline
To properly evaluate the performance of each model you've chosen, it's important that you create a training and predicting pipeline that allows you to quickly and effectively train models using various sizes of training data and perform predictions on the testing data. Your implementation here will be used in the following section.
In the code block below, you will need to implement the following:
- Import `fbeta_score` and `accuracy_score` from [`sklearn.metrics`](http://scikit-learn.org/stable/modules/classes.html#sklearn-metrics-metrics).
- Fit the learner to the sampled training data and record the training time.
- Perform predictions on the test data `X_test`, and also on the first 300 training points `X_train[:300]`.
- Record the total prediction time.
- Calculate the accuracy score for both the training subset and testing set.
- Calculate the F-score for both the training subset and testing set.
- Make sure that you set the `beta` parameter!
```
# TODO: Import two metrics from sklearn - fbeta_score and accuracy_score
def train_predict(learner, sample_size, X_train, y_train, X_test, y_test):
'''
inputs:
- learner: the learning algorithm to be trained and predicted on
- sample_size: the size of samples (number) to be drawn from training set
- X_train: features training set
- y_train: income training set
- X_test: features testing set
- y_test: income testing set
'''
results = {}
# TODO: Fit the learner to the training data using slicing with 'sample_size' using .fit(training_features[:], training_labels[:])
start = time() # Get start time
learner = None
end = time() # Get end time
# TODO: Calculate the training time
results['train_time'] = None
# TODO: Get the predictions on the test set(X_test),
# then get predictions on the first 300 training samples(X_train) using .predict()
start = time() # Get start time
predictions_test = None
predictions_train = None
end = time() # Get end time
# TODO: Calculate the total prediction time
results['pred_time'] = None
# TODO: Compute accuracy on the first 300 training samples which is y_train[:300]
results['acc_train'] = None
# TODO: Compute accuracy on test set using accuracy_score()
results['acc_test'] = None
# TODO: Compute F-score on the the first 300 training samples using fbeta_score()
results['f_train'] = None
# TODO: Compute F-score on the test set which is y_test
results['f_test'] = None
# Success
print("{} trained on {} samples.".format(learner.__class__.__name__, sample_size))
# Return the results
return results
```
### Implementation: Initial Model Evaluation
In the code cell, you will need to implement the following:
- Import the three supervised learning models you've discussed in the previous section.
- Initialize the three models and store them in `'clf_A'`, `'clf_B'`, and `'clf_C'`.
- Use a `'random_state'` for each model you use, if provided.
- **Note:** Use the default settings for each model — you will tune one specific model in a later section.
- Calculate the number of records equal to 1%, 10%, and 100% of the training data.
- Store those values in `'samples_1'`, `'samples_10'`, and `'samples_100'` respectively.
**Note:** Depending on which algorithms you chose, the following implementation may take some time to run!
```
# TODO: Import the three supervised learning models from sklearn
# TODO: Initialize the three models
clf_A = None
clf_B = None
clf_C = None
# TODO: Calculate the number of samples for 1%, 10%, and 100% of the training data
# HINT: samples_100 is the entire training set i.e. len(y_train)
# HINT: samples_10 is 10% of samples_100 (ensure to set the count of the values to be `int` and not `float`)
# HINT: samples_1 is 1% of samples_100 (ensure to set the count of the values to be `int` and not `float`)
samples_100 = None
samples_10 = None
samples_1 = None
# Collect results on the learners
results = {}
for clf in [clf_A, clf_B, clf_C]:
clf_name = clf.__class__.__name__
results[clf_name] = {}
for i, samples in enumerate([samples_1, samples_10, samples_100]):
results[clf_name][i] = \
train_predict(clf, samples, X_train, y_train, X_test, y_test)
# Run metrics visualization for the three supervised learning models chosen
vs.evaluate(results, accuracy, fscore)
```
----
## Improving Results
In this final section, you will choose from the three supervised learning models the *best* model to use on the student data. You will then perform a grid search optimization for the model over the entire training set (`X_train` and `y_train`) by tuning at least one parameter to improve upon the untuned model's F-score.
### Question 3 - Choosing the Best Model
* Based on the evaluation you performed earlier, in one to two paragraphs, explain to *CharityML* which of the three models you believe to be most appropriate for the task of identifying individuals that make more than \$50,000.
** HINT: **
Look at the graph at the bottom left from the cell above(the visualization created by `vs.evaluate(results, accuracy, fscore)`) and check the F score for the testing set when 100% of the training set is used. Which model has the highest score? Your answer should include discussion of the:
* metrics - F score on the testing when 100% of the training data is used,
* prediction/training time
* the algorithm's suitability for the data.
**Answer: **
### Question 4 - Describing the Model in Layman's Terms
* In one to two paragraphs, explain to *CharityML*, in layman's terms, how the final model chosen is supposed to work. Be sure that you are describing the major qualities of the model, such as how the model is trained and how the model makes a prediction. Avoid using advanced mathematical jargon, such as describing equations.
** HINT: **
When explaining your model, if using external resources please include all citations.
**Answer: **
### Implementation: Model Tuning
Fine tune the chosen model. Use grid search (`GridSearchCV`) with at least one important parameter tuned with at least 3 different values. You will need to use the entire training set for this. In the code cell below, you will need to implement the following:
- Import [`sklearn.grid_search.GridSearchCV`](http://scikit-learn.org/0.17/modules/generated/sklearn.grid_search.GridSearchCV.html) and [`sklearn.metrics.make_scorer`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html).
- Initialize the classifier you've chosen and store it in `clf`.
- Set a `random_state` if one is available to the same state you set before.
- Create a dictionary of parameters you wish to tune for the chosen model.
- Example: `parameters = {'parameter' : [list of values]}`.
- **Note:** Avoid tuning the `max_features` parameter of your learner if that parameter is available!
- Use `make_scorer` to create an `fbeta_score` scoring object (with $\beta = 0.5$).
- Perform grid search on the classifier `clf` using the `'scorer'`, and store it in `grid_obj`.
- Fit the grid search object to the training data (`X_train`, `y_train`), and store it in `grid_fit`.
**Note:** Depending on the algorithm chosen and the parameter list, the following implementation may take some time to run!
```
# TODO: Import 'GridSearchCV', 'make_scorer', and any other necessary libraries
# TODO: Initialize the classifier
clf = None
# TODO: Create the parameters list you wish to tune, using a dictionary if needed.
# HINT: parameters = {'parameter_1': [value1, value2], 'parameter_2': [value1, value2]}
parameters = None
# TODO: Make an fbeta_score scoring object using make_scorer()
scorer = None
# TODO: Perform grid search on the classifier using 'scorer' as the scoring method using GridSearchCV()
grid_obj = None
# TODO: Fit the grid search object to the training data and find the optimal parameters using fit()
grid_fit = None
# Get the estimator
best_clf = grid_fit.best_estimator_
# Make predictions using the unoptimized and model
predictions = (clf.fit(X_train, y_train)).predict(X_test)
best_predictions = best_clf.predict(X_test)
# Report the before-and-afterscores
print("Unoptimized model\n------")
print("Accuracy score on testing data: {:.4f}".format(accuracy_score(y_test, predictions)))
print("F-score on testing data: {:.4f}".format(fbeta_score(y_test, predictions, beta = 0.5)))
print("\nOptimized Model\n------")
print("Final accuracy score on the testing data: {:.4f}".format(accuracy_score(y_test, best_predictions)))
print("Final F-score on the testing data: {:.4f}".format(fbeta_score(y_test, best_predictions, beta = 0.5)))
```
### Question 5 - Final Model Evaluation
* What is your optimized model's accuracy and F-score on the testing data?
* Are these scores better or worse than the unoptimized model?
* How do the results from your optimized model compare to the naive predictor benchmarks you found earlier in **Question 1**?_
**Note:** Fill in the table below with your results, and then provide discussion in the **Answer** box.
#### Results:
| Metric | Unoptimized Model | Optimized Model |
| :------------: | :---------------: | :-------------: |
| Accuracy Score | | |
| F-score | | EXAMPLE |
**Answer: **
----
## Feature Importance
An important task when performing supervised learning on a dataset like the census data we study here is determining which features provide the most predictive power. By focusing on the relationship between only a few crucial features and the target label we simplify our understanding of the phenomenon, which is most always a useful thing to do. In the case of this project, that means we wish to identify a small number of features that most strongly predict whether an individual makes at most or more than \$50,000.
Choose a scikit-learn classifier (e.g., adaboost, random forests) that has a `feature_importance_` attribute, which is a function that ranks the importance of features according to the chosen classifier. In the next python cell fit this classifier to training set and use this attribute to determine the top 5 most important features for the census dataset.
### Question 6 - Feature Relevance Observation
When **Exploring the Data**, it was shown there are thirteen available features for each individual on record in the census data. Of these thirteen records, which five features do you believe to be most important for prediction, and in what order would you rank them and why?
**Answer:**
### Implementation - Extracting Feature Importance
Choose a `scikit-learn` supervised learning algorithm that has a `feature_importance_` attribute availble for it. This attribute is a function that ranks the importance of each feature when making predictions based on the chosen algorithm.
In the code cell below, you will need to implement the following:
- Import a supervised learning model from sklearn if it is different from the three used earlier.
- Train the supervised model on the entire training set.
- Extract the feature importances using `'.feature_importances_'`.
```
# TODO: Import a supervised learning model that has 'feature_importances_'
# TODO: Train the supervised model on the training set using .fit(X_train, y_train)
model = None
# TODO: Extract the feature importances using .feature_importances_
importances = None
# Plot
vs.feature_plot(importances, X_train, y_train)
```
### Question 7 - Extracting Feature Importance
Observe the visualization created above which displays the five most relevant features for predicting if an individual makes at most or above \$50,000.
* How do these five features compare to the five features you discussed in **Question 6**?
* If you were close to the same answer, how does this visualization confirm your thoughts?
* If you were not close, why do you think these features are more relevant?
**Answer:**
### Feature Selection
How does a model perform if we only use a subset of all the available features in the data? With less features required to train, the expectation is that training and prediction time is much lower — at the cost of performance metrics. From the visualization above, we see that the top five most important features contribute more than half of the importance of **all** features present in the data. This hints that we can attempt to *reduce the feature space* and simplify the information required for the model to learn. The code cell below will use the same optimized model you found earlier, and train it on the same training set *with only the top five important features*.
```
# Import functionality for cloning a model
from sklearn.base import clone
# Reduce the feature space
X_train_reduced = X_train[X_train.columns.values[(np.argsort(importances)[::-1])[:5]]]
X_test_reduced = X_test[X_test.columns.values[(np.argsort(importances)[::-1])[:5]]]
# Train on the "best" model found from grid search earlier
clf = (clone(best_clf)).fit(X_train_reduced, y_train)
# Make new predictions
reduced_predictions = clf.predict(X_test_reduced)
# Report scores from the final model using both versions of data
print("Final Model trained on full data\n------")
print("Accuracy on testing data: {:.4f}".format(accuracy_score(y_test, best_predictions)))
print("F-score on testing data: {:.4f}".format(fbeta_score(y_test, best_predictions, beta = 0.5)))
print("\nFinal Model trained on reduced data\n------")
print("Accuracy on testing data: {:.4f}".format(accuracy_score(y_test, reduced_predictions)))
print("F-score on testing data: {:.4f}".format(fbeta_score(y_test, reduced_predictions, beta = 0.5)))
```
### Question 8 - Effects of Feature Selection
* How does the final model's F-score and accuracy score on the reduced data using only five features compare to those same scores when all features are used?
* If training time was a factor, would you consider using the reduced data as your training set?
**Answer:**
> **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to
**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
| github_jupyter |
# Class activation map evaluation
```
import cv2
import numpy as np
import matplotlib.pyplot as plt
import json
import os
import pandas as pd
from pocovidnet.evaluate_covid19 import Evaluator
from pocovidnet.grad_cam import GradCAM
from pocovidnet.cam import get_class_activation_map
from pocovidnet.model import get_model
```
## Code to crop ICLUS videos automatically
```
with open(os.path.join("../../../../data_pocovid/results_oct_wrong_crossval/iclus/", 'ICLUS_cropping.json'), "r") as infile:
frame_cut = json.load(infile)
bottom = 70 # 90
top = 570 # 542
left = 470 # 480
right = 970 # 932
# [70:570, 470:970]
crop = [bottom, top, left, right]
data_dir = "../../../data/ICLUS"
for subfolder in os.listdir(data_dir):
if "linear" in subfolder.lower() or subfolder.startswith(".") or not os.path.isdir(os.path.join(data_dir,subfolder)):
continue
for vid in os.listdir(os.path.join(data_dir, subfolder)):
vid_id = vid.split(".")[0]
if vid.startswith("."):
continue
print("process next file ", vid)
if vid_id not in ["40", "42"]: # frame_cut.keys():
continue
video_path = os.path.join(data_dir, subfolder, vid)
crop = frame_cut[vid_id]
while True:
bottom, top, left, right = crop
cap = cv2.VideoCapture(video_path)
# count = 0
# while cap.isOpened() and count< 1:
for _ in range(3):
ret, frame = cap.read()
plt.imshow(frame[bottom:top, left:right])
plt.show()
crop_in = input("okay?")
if crop_in == 1 or crop_in =="1":
frame_cut[vid_id] = crop
break
crop_in = input("input list " + str(crop))
crop = eval(crop_in)
print(crop)
# out_iclus_data = "../results_oct/iclus"
with open(os.path.join(data_dir, 'ICLUS_cropping.json'), "w") as outfile:
json.dump(frame_cut, outfile)
```
### ICLUS Auswertung:
```
severity = pd.read_csv("../../../data/iclus_severity.csv", delimiter=";")
convex_table = severity[severity["filename"].str.contains("convex")]
convex_vids = convex_table["Video"]
# Make list of IDs that we analyze
data_dir = "../../../data/ICLUS"
process_vid_numbers = []
for subfolder in os.listdir(data_dir):
if "linear" in subfolder.lower() or subfolder.startswith(".") or os.path.isfile(os.path.join(data_dir,subfolder)):
continue
for vid in os.listdir(os.path.join(data_dir, subfolder)):
vid_id = vid.split(".")[0]
if vid.startswith("."):
continue
video_path = os.path.join(data_dir, subfolder, vid)
# print(int(vid.split(".")[0]) in convex_vids)
process_vid_numbers.append(int(vid.split(".")[0]) )
# Check whether we cover all videos
for vid in convex_vids.values:
if vid not in process_vid_numbers:
print("In ICLUS tabelle but not in our folder:", vid)
if str(vid) not in frame_cut.keys():
print("not in crop dict:", vid)
for vid in process_vid_numbers:
if vid not in convex_vids.values:
print("In our folder but not in ICLUS:", vid)
# Make label dict:
iclus_labels = dict(zip(convex_table["Video"], convex_table["Score"]))
in_path = os.path.join(res_dir, f"cam_{vid_id}.npy")
os.path.exists(in_path)
```
### Analyze results
```
# 6 normal (Gabriel, but here 1), 25 normal (Gabriel), but here 3
iclus_labels
# directory with numpy files
len(iclus_labels.keys())
res_dir = "../../../../data_pocovid/results_oct_wrong_crossval/iclus/base"
gt, preds, pred_probs = list(), list(), list()
print("gt pred")
for vid_id in iclus_labels.keys():
in_path = os.path.join(res_dir, f"cam_{vid_id}.npy")
if not os.path.exists(in_path):
print("Warning: logits do not exist", in_path)
continue
logits = np.load(in_path)
prob = np.mean(logits[:, 0])
avg_covid_prob = np.argmax(np.mean(logits, axis=0)) #
# print(avg_covid_prob)
gt.append(iclus_labels[vid_id])
pred_probs.append(prob)
preds.append(avg_covid_prob)
if iclus_labels[vid_id]>2 and avg_covid_prob==2 or iclus_labels[vid_id]==0 and avg_covid_prob==0:
print("wrong, severity is ", iclus_labels[vid_id], "pred is", avg_covid_prob,"video:", vid_id)
# print(gt[-1], preds[-1])
plt.scatter(gt, pred_probs)
plt.plot([0,3], [0,1])
plt.show()
check = "../../models/cross_validation_neurips/"
file_list = list()
for folder in os.listdir(check):
if folder[0] == ".":
continue
for classe in os.listdir(os.path.join(check, folder)):
if classe[0] == "." or classe[0] == "u":
continue
uni = []
is_image = 0
for file in os.listdir(os.path.join(check, folder, classe)):
if file[0] == ".":
continue
if len(file.split(".")) == 2:
is_image += 1
uni.append(file.split(".")[0])
file_list.extend(np.unique(uni).tolist())
with open("../../models/in_neurips.json", "w") as outfile:
json.dump(file_list, outfile)
```
### Old video evaluator
```
from skvideo import io
class VideoEvaluator(Evaluator):
def __init__(self, weights_dir="../trained_models_cam", ensemble=True, split=None, model_id=None, num_classes=3):
Evaluator.__init__(
self, ensemble=ensemble, split=split, model_id=model_id, num_classes=num_classes
)
def __call__(self, video_path):
"""Performs a forward pass through the restored model
Arguments:
video_path: str -- file path to a video to process. Possibly types are mp4, gif, mpeg
return_cams: int -- number of frames to return with activation maps overlayed. If zero,
only the predictions will be returned. Always selects the frames with
highest probability for the predicted class
Returns:
cams: if return_cams>0, images with overlay are returned as a np.array of shape
{number models} x {return_cams} x 224 x 224 x 3
mean_preds: np array of shape {video length} x {number classes}. Contains
class probabilities per frame
"""
self.image_arr = self.read_video(video_path)
self.predictions = np.stack([model.predict(self.image_arr) for model in self.models])
mean_preds = np.mean(self.predictions, axis=0, keepdims=False)
class_idx = np.argmax(np.mean(np.array(mean_preds), axis=0))
return mean_preds
def cam_important_frames(self, class_idx, threshold=0.5, nr_cams=None, zeroing=0.65, save_video_path=None): # "out_video.mp4"):
mean_preds = np.mean(self.predictions, axis=0, keepdims=False)
# compute general video class
# class_idx = np.argmax(np.mean(np.array(mean_preds), axis=0))
prediction = np.argmax(np.mean(np.array(mean_preds), axis=0))
print("predicted", prediction, "gt", class_idx)
print("pred probs covid", [round(m, 2) for m in mean_preds[:,0]])
# get most important frames (the ones above threshold)
if nr_cams is not None:
best_frames = np.argsort(mean_preds[:, class_idx])[-nr_cams:]
else:
best_frames = np.where(mean_preds[:, class_idx]>threshold)[0]
# best_frames = np.arange(len(mean_preds))
print("frames above threshold", best_frames)
return_cams = len(best_frames)
if len(best_frames)==0:
print("no frame above threshold")
return 0
# copy image arr - need values between 0 and 255
copied_arr = (self.image_arr.copy() * 255).astype(int)
cams = np.zeros((return_cams, 224, 224, 3))
for j, b_frame in enumerate(best_frames):
# get highest prob model for these frames
model_idx = np.argmax(self.predictions[:, b_frame, class_idx], axis=0)
take_model = self.models[model_idx]
if "cam" in self.model_id:
in_img = np.expand_dims(self.image_arr[b_frame], 0)
# print(in_img.shape)
cams[j] = get_class_activation_map(take_model, in_img, class_idx, image_weight=1, zeroing=zeroing).astype(int)
else:
# run grad cam for other models
gradcam = GradCAM()
cams[j] = gradcam.explain(self.image_arr[b_frame], take_model, class_idx, return_map=False,image_weight=1, layer_name="block5_conv3", zeroing=zeroing, heatmap_weight=0.25)
if save_video_path is None:
return cams
else:
for j in range(return_cams):
copied_arr[best_frames[j]] = cams[j]
copied_arr = np.repeat(copied_arr, 3, axis=0)
io.vwrite(save_video_path+".mpeg", copied_arr, outputdict={"-vcodec":"mpeg2video"})
def read_video(self, video_path):
assert os.path.exists(video_path), "video file not found"
cap = cv2.VideoCapture(video_path)
images = []
counter = 0
while cap.isOpened():
ret, frame = cap.read()
if (ret != True):
break
if counter<1:
plt.imshow(frame[30:360, 100:430]) # ICLUS: [70:570, 470:970]) # [25:350, 100:425]) # LOTTE:[30:400, 80:450]
plt.show()
counter += 1
continue
counter += 1
img_processed = self.preprocess(frame)[0]
images.append(img_processed)
cap.release()
return np.array(images)
def preprocess(self, image, cut=True):
"""Apply image preprocessing pipeline
Arguments:
image {np.array} -- Arbitrary shape, quadratic preferred
Returns:
np.array -- Shape 224,224. Normalized to [0, 1].
"""
if cut:
image = image[30:360, 100:430]
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (224, 224))
image = np.expand_dims(np.array(image), 0) / 255.0
return image
def important_frames(self, preds, predicted_class, n_return=5):
preds_arr = np.array(preds)
frame_scores = preds_arr[:, predicted_class]
best_frames = np.argsort(frame_scores)[-n_return:]
return best_frames
evaluator = VideoEvaluator(ensemble=True, model_id="vgg_cam", num_classes=4)
```
### Run ICLUS data
```
def pred_plot(preds, save_path):
plt.figure(figsize=(15,8))
plt.plot(preds[:,0], label="covid")
plt.plot(preds[:,1], label="pneu")
plt.plot(preds[:,2], label="healthy")
plt.legend()
plt.savefig(save_path+".png")
plt.show()
# plt.plot(preds[:,1], label="pneu")
# iclus_dir = "Videos_31_to_40"
iclus_dir = "test_data_regular/pat2"
# iclus_dir = "data/pocus_videos/convex/"
# out_iclus_data = "vids_preds_regular_test"
# out_iclus_data = "vids_preds_iclus"
out_iclus_data = "reg_test/pat2"
GT_CLASS = 2
for vid in os.listdir(iclus_dir):
vid_id = vid.split(".")[0]
if vid.startswith(".") or os.path.exists(os.path.join(out_iclus_data,"cam_"+vid_id+".npy")):
print("already done", vid)
continue
print("process next file ", vid)
preds = evaluator(os.path.join(iclus_dir, vid))
np.save(os.path.join(out_iclus_data,"cam_"+vid_id+".npy"), preds)
plt.imshow(evaluator.image_arr[0])
plt.savefig(os.path.join(out_iclus_data,"cam_"+vid_id+"expl_img.png"))
print("saved predictions")
pred_plot(preds, os.path.join(out_iclus_data,"cam_"+vid_id))
print("saved plot")
evaluator.cam_important_frames(GT_CLASS, save_video_path=os.path.join(out_iclus_data, "cam_"+vid_id))
```
#### ICLUS notes:
47 falsch predicted aber passt
schaut weird aus: 48, 49, 50 (linear or what is this? alle als healthy predicted)
Must do again: 36
13, 11, 31, 32: linear probes that are deleted, 22, 24, 26 (they are all kept), 28
12, 15, 16, 17, 18, 19, 20 were fine already with bad cropping
1, 3, 9, 10 is fine already
NEW PROCESSED: 14, 8, 7, 6, 4, 5, 2
CODE TO PROCESS SOME AGAIN:
if os.path.exists("vids_preds_iclus/cam_vid"+vid_id+".npy"):
preds_prev = np.load("vids_preds_iclus/cam_vid"+vid_id+".npy")
predicted_class = np.argmax(np.mean(np.array(preds_prev), axis=0))
print(predicted_class, np.mean(np.array(preds_prev), axis=0))
if predicted_class==0:
print("file is already predicted covid", vid)
continue
### Evaluate on train data
```
vid_in_path = "../../data/pocus_videos/Convex/"
gt_dict = {"Cov":0, "Reg":2, "Pne":1, "pne":1}
out_path="vid_outputs_cam"
for vid in os.listdir(vid_in_path):
if vid[:3] not in ["Pne", "pne", "Cov", "Reg"]:
print(vid)
continue
if os.path.exists(os.path.join(out_path, vid.split(".")[0]+".mpeg")):
print("already done", vid)
continue
vid_in = os.path.join(vid_in_path, vid)
print(vid_in)
preds = evaluator(vid_in)
gt = gt_dict[vid[:3]]
evaluator.cam_important_frames(gt, save_video_path=os.path.join(out_path, vid.split(".")[0]))
```
### Evaluate on test data
```
out_path_overall="vid_outputs_cam_test/"
path_crossval = "../../data/cross_validation"
per_split = [[] for _ in range(5)]
for fold in range(5):
out_path = os.path.join(out_path_overall, "fold"+str(fold))
# load weights of the respective fold model
print("NEW FOLD", fold)
# make sure the variable is cleared
evaluator = None
# load weights
evaluator = VideoEvaluator(ensemble=False, split=fold, model_id="vgg_cam", num_classes=4)
# get all names belonging to this fold
vidnames = []
for mod in ["covid", "pneumonia", "regular"]:
for f in os.listdir(os.path.join(path_crossval, "split"+str(fold), mod)):
if f[0]!=".":
fparts = f.split(".")
vidnames.append(fparts[0]+"."+fparts[1][:3])
# iterate over the relevant files
names = np.unique(vidnames)
for name in names:
if name[-3:] in ["mp4", "mov", "gif"]:
print(name)
vid_in = os.path.join(vid_in_path, name)
if not os.path.exists(vid_in):
print("does not exist! - butterfly?", vid_in)
continue
if os.path.exists(os.path.join(out_path, name.split(".")[0]+".mpeg")):
print("already done", name)
continue
print(vid_in)
preds = evaluator(vid_in)
gt = gt_dict[name[:3]]
evaluator.cam_important_frames(gt, save_video_path=os.path.join(out_path, name.split(".")[0]))
```
## Make point plot for CAMs
```
def max_kernel(heatmap, kernel_size=9):
k2 = kernel_size//2
# pad array
arr = np.pad(heatmap, ((k2,k2),(k2,k2)), 'constant', constant_values=0)
# get coordinates of maximum
x_coords, y_coords = divmod(np.argmax(arr.flatten()), len(arr[0]))
patch = arr[x_coords-k2:x_coords+k2+1, y_coords-k2:y_coords+k2+1]
# print(x_coords, y_coords)
# plt.imshow(arr)
# plt.show()
res_out = np.zeros((kernel_size-2,kernel_size-2))
for i in range(kernel_size-2):
for j in range(kernel_size-2):
res_out[i,j] = np.mean(patch[i:i+3, j:j+3])
max_x, max_y = divmod(np.argmax(res_out.flatten()), kernel_size-2)
# print(max_x, max_y)
# print(x_coords+max_x-k2+1, y_coords+max_y-k2+1)
# plt.imshow(res_out)
# plt.show()
return x_coords+max_x-2*k2+1, y_coords+max_y-2*k2+1
# max_kernel((np.random.rand(10,10)*20).astype(int))
def convolve_faster(img, kernel):
"""
Convolve a 2d img with a kernel, storing the output in the cell
corresponding the the left or right upper corner
:param img: 2d numpy array
:param kernel: kernel (must have equal size and width)
:param neg: if neg=0, store in upper left corner, if neg=1,
store in upper right corner
:return convolved image of same size
"""
k_size = len(kernel)
# a = np.pad(img, ((0, k_size-1), (0, k_size-1)))
padded = np.pad(img, ((k_size//2, k_size//2), (k_size//2, k_size//2)))
s = kernel.shape + tuple(np.subtract(padded.shape, kernel.shape) + 1)
strd = np.lib.stride_tricks.as_strided
subM = strd(padded, shape=s, strides=padded.strides * 2)
return np.einsum('ij,ijkl->kl', kernel, subM)
# in_img = np.random.rand(20,20)
# plt.imshow(in_img)
# plt.show()
# out = convolve_faster(in_img, np.ones((7,7)))
# plt.imshow(out)
# plt.show()
# print(in_img.shape, out.shape)
```
### Process all test data
```
path_crossval = "../../data/cross_validation"
gt_dict = {"Reg":2, "Pne":1, "pne":1, "Cov":0}
gradcam = GradCAM()
all_predictions = []
heatmap_points, predicted, gt_class, overlays, fnames = [], [], [], [], []
for fold in range(5):
# load weights of the respective fold model
print("NEW FOLD", fold)
# make sure the variable is cleared
evaluator = None
# load weights
evaluator = Evaluator(ensemble=False, split=fold, model_id="vgg_base", num_classes=4)
# get all names belonging to this fold
all_images_arr = []
gt, name = [], []
for mod in ["covid", "pneumonia", "regular"]:
for f in os.listdir(os.path.join(path_crossval, "split"+str(fold), mod)):
if f[0]!=".":
# fparts = f.split(".")
# vidnames.append(fparts[0]+"."+fparts[1][:3])
img_loaded = cv2.imread(os.path.join(path_crossval, "split"+str(fold), mod, f))
img_preprocc = evaluator.preprocess(img_loaded)[0]
gt.append(gt_dict[f[:3]])
all_images_arr.append(img_preprocc)
name.append(f)
all_images_arr = np.array(all_images_arr)
# get predictions
print("process all images in fold", fold, "with shape", all_images_arr.shape)
fold_preds = evaluator.models[0].predict(all_images_arr)
class_idx_per_img = np.argmax(fold_preds, axis=1)
all_predictions.append(fold_preds)
# get heatmap
for i, img in enumerate(all_images_arr):
# plt.imshow(img)
# plt.show()
# overlay, heatmap = get_class_activation_map(evaluator.models[0], img, gt[i], image_weight=1, return_map=True, zeroing=0.65)
overlay, heatmap = gradcam.explain(img, evaluator.models[0], gt[i], return_map=True, image_weight=1, layer_name="block5_conv3", zeroing=0.65, heatmap_weight=0.25)
# plt.figure(figsize=(10,10))
# plt.imshow(overlay.astype(int))
# plt.show()
overlays.append(overlay.astype(int))
# convolve with big kernel
convolved_overlay = convolve_faster(heatmap, np.ones((19,19)))
# print("previously:", divmod(np.argmax(heatmap.flatten()), len(heatmap[0])))
x_coord, y_coord = divmod(np.argmax(convolved_overlay.flatten()), len(convolved_overlay[0]))
## previous version: 9x9 umkreis and 3x3 kernel
# x_coord, y_coord = max_kernel(heatmap) # np.where(heatmap==np.max(heatmap))
# print(x_coord, y_coord)
heatmap_points.append([x_coord, y_coord])
predicted.append(class_idx_per_img[i])
gt_class.append(gt[i])
fnames.append(name[i])
# print([x_coord, y_coord], class_idx_per_img[i], gt[i])
len(predicted), len(gt_class), len(heatmap_points), np.asarray(overlays).shape
np.where(np.asarray(predicted)==3)
hm_p = np.array(heatmap_points)
print(hm_p.shape)
# plt.figure(figsize=(20,20))
plt.imshow(overlays[1])
plt.scatter(hm_p[:,1], hm_p[:,0], c=predicted)
plt.show()
hm_p = np.array(heatmap_points)
print(hm_p.shape)
# plt.figure(figsize=(20,20))
plt.imshow(overlays[1])
plt.scatter(hm_p[:,1], hm_p[:,0], c=predicted)
plt.show()
df = pd.DataFrame()
df["file"] = fnames
df["predicted"] = predicted
df["gt"] = gt_class
df["max_x"] = np.asarray(heatmap_points)[:,0].tolist()
df["max_y"] = np.asarray(heatmap_points)[:,1].tolist()
df.to_csv("heatmap_points_grad.csv")
np.save("overlayed_hm.npy", overlays)
df
```
## ICLUS evaluation
```
# out_iclus_data = "vids_preds_regular_test"
out_iclus_data = "vids_preds_iclus"
all_class_preds = []
correct_frames = 0
wrong_frames = 0
avg_corr_frames = []
all_frames = 0
# plt.figure(figsize=(20,10))
for f in os.listdir(out_iclus_data):
if f[-3:]=="npy":
preds = np.load(os.path.join(out_iclus_data, f))
# plt.plot(preds[:,0])
# print(preds.shape)
# frame based
frame_pred = np.argmax(preds, axis=1)
all_frames += len(frame_pred)
correct_frames += np.sum(frame_pred==0)
wrong_frames += np.sum(frame_pred!=0)
avg_corr_frames.append(np.sum(frame_pred==0)/len(frame_pred))
# video classification - majority vote
uni, counts = np.unique(frame_pred, return_counts=True)
# all_class_preds.append(uni[np.argmax(counts)])
# version with probabilities and not majority vote:
vid_class_pred = np.argmax(np.mean(preds, axis=0))
all_class_preds.append(vid_class_pred)
if all_class_preds[-1]!=0:
print("wrongly classified", f)
# print(wrong_frames+ correct_frames, all_frames)
print("Included in total ICLUS videos (without linear probes):", len(all_class_preds))
assert all_frames==wrong_frames+correct_frames
print("Frame accuracy:", correct_frames/float(all_frames))
print("video class accuracy (max avg probability): ", np.sum(np.array(all_class_preds)==0)/len(all_class_preds))
print("Mean and std of ratio of correctly classified frames per video:", np.mean(avg_corr_frames), np.std(avg_corr_frames))
# plt.show()
iclus_preds = all_class_preds
```
## Evaluation Lotte's test data
```
reg_test_data = "vid_outputs_REGULAR"
all_class_preds = []
correct_frames = 0
wrong_frames = 0
avg_corr_frames = []
all_frames = 0
# plt.figure(figsize=(20,10))
for subdir in os.listdir(reg_test_data):
if subdir[0]==".":
continue
print(subdir)
for f in os.listdir(os.path.join(reg_test_data, subdir)):
if f[-3:]=="npy":
preds = np.load(os.path.join(reg_test_data, subdir, f))
print(os.path.join(reg_test_data, subdir, f))
# print(preds.shape)
# frame based
frame_pred = np.argmax(preds, axis=1)
all_frames += len(frame_pred)
correct_frames += np.sum(frame_pred==2)
wrong_frames += np.sum(frame_pred!=2)
avg_corr_frames.append(np.sum(frame_pred==2)/len(frame_pred))
# video classification - majority vote
vid_class_pred = np.argmax(np.mean(preds, axis=0))
all_class_preds.append(vid_class_pred)
# print(frame_pred)
if all_class_preds[-1]!=2:
print("wrongly classified", f)
# version with probabilities and not majority vote:
# vid_class_pred = np.argmax(np.mean(preds, axis=0))
# all_class_preds.append(vid_class_pred)
# print(wrong_frames+ correct_frames, all_frames)
print("Included in total ICLUS videos (without linear probes):", len(all_class_preds))
assert all_frames==wrong_frames+correct_frames
print("Frame accuracy:", correct_frames/float(all_frames))
print("video class accuracy (max avg probability): ", np.sum(np.array(all_class_preds)==2)/len(all_class_preds))
print("Mean and std of ratio of correctly classified frames per video:", np.mean(avg_corr_frames), np.std(avg_corr_frames))
# plt.show()
reg_preds = all_class_preds
# sensitivity of both together
all_gt = np.asarray([1 for _ in range(len(iclus_preds))] + [0 for _ in range(len(reg_preds))])
all_preds = np.asarray(iclus_preds + reg_preds)
all_preds = np.absolute(all_preds/2 - 1).astype(int)
print(all_preds)
print(len(all_preds), len(all_gt))
print(recall_score(all_gt, all_preds))
print(precision_score(all_gt, all_preds))
from sklearn.metrics import recall_score, precision_score, accuracy_score
accuracy_score(all_gt, all_preds)
```
## MD comments evaluation
### Read in and merge
```
mapping = pd.read_csv("mapping.csv").drop(columns=["Unnamed: 0"])
gb_comments = pd.read_csv("CAM_scores_GB.csv")
gb_comments = gb_comments.drop([0,1])
lotte_comments = pd.read_csv("CAM_scores_lotte.csv")
lotte_comments = lotte_comments.rename(columns={'Score - how helpful is the heatmap (0=only distracting, 5=very helpful)': 'lotte_score',
'Better one (put 1 if this one is the better one)': "lotte_better",
'Class (Your guess)': 'lotte_class',
'Patterns that can be seen':'lotte_patterns',
'Patterns the heatmap highlights':'lotte_heatmap_patterns'}).drop(columns=["Unnamed: 6"])
gb_comments = gb_comments.rename(columns={'Score - how helpful is the heatmap (0=only distracting, 5=very helpful)': 'gb_score',
'Better one (put 1 if this one is the better one)': "gb_better",
'Class (Your guess)': 'gb_class',
'Patterns that can be seen':'gb_patterns',
'Patterns the heatmap highlights':'gb_heatmap_patterns'})
lotte_comments['lotte_score'] = lotte_comments['lotte_score'].apply(lambda x: x-3 + int(x>=3))
merge_map_gb = pd.merge(mapping, gb_comments, how="inner", left_on="new_filename", right_on="Filename")
merge_map_lotte = pd.merge(merge_map_gb, lotte_comments, how="inner", left_on="new_filename", right_on="Filename")
merge_map_lotte.to_csv("CAM_scores_MDs.csv")
```
### Clean
```
final_table.columns
# after manual cleaning:
final_table = pd.read_csv("CAM_scores_MDs.csv")
train_score_gb = 0
test_score_gb = 0
train_score_lo = 0
test_score_lo = 0
train_better_gb = []
train_better_lo = []
for group_name, group_df in final_table.groupby("previous_filename"):
print("--------")
print(group_df[["gb_better", "lotte_better", "is_train"]])
if np.all(pd.isnull(group_df["gb_better"])) or len(np.where(group_df["gb_better"].values=="1")[0])==0:
train_score_gb += 0.5
test_score_gb += 0.5
print("gb: equally good")
train_better_gb.append(0.5)
else:
# if len(np.where(group_df["gb_better"].values=="1")[0])==0:
# raise RuntimeError("no valid value found")
if np.where(group_df["gb_better"].values=="1")==np.where(group_df["is_train"].values==1):
print("gb: train better")
train_score_gb += 1
train_better_gb.append(1)
else:
test_score_gb += 1
train_better_gb.append(0)
print("gb: test better")
# get lotte score
if np.all(pd.isnull(group_df["lotte_better"])):
train_score_gb += 0.5
test_score_gb += 0.5
train_better_lo.append(0.5)
print("lotte: equally good")
else:
if len(np.where(group_df["lotte_better"].values==1)[0])==0:
raise RuntimeError("no valid value found")
if np.where(group_df["lotte_better"].values==1)==np.where(group_df["is_train"].values==1):
print("lotte: train better")
train_score_lo += 1
train_better_lo.append(1)
else:
test_score_lo += 1
train_better_lo.append(0)
print("lotte: test better")
for i, row in group_df.iterrows():
if int(row["is_train"])==1:
print(row["gb_better"], row["lotte_better"], row["is_train"])
# gb_scores = group_df["gb_better"]
# lotte_scores = group_df["lotte_better"]
# train_test = group_df["is_train"]
len(train_better_lo), len(train_better_gb)
better_arr = np.swapaxes(np.stack([train_better_lo, train_better_gb]), 1, 0)
agree = np.sum(better_arr[:,0]==better_arr[:,1])
print("agreement (both exactly same)", agree/len(better_arr))
print("disagreement (one 1 one 0)", len(np.where(np.absolute(better_arr[:,0]-better_arr[:,1])==1)[0])/len(better_arr))
print("average score for train better:", np.mean(train_better_lo), np.mean(train_better_gb))
print("numbers unique",np.unique(train_better_lo, return_counts=True), np.unique(train_better_gb, return_counts=True))
```
#### Evaluate scores - Add label
```
label = [val[:3].lower() for val in final_table["previous_filename"].values]
np.unique(label, return_counts=True)
np.mean(final_table[final_table["is_train"]==0]["gb_score"])
final_table["label"] = label
```
#### Get average score of Lotte and Gabriel together
```
only_test = final_table[final_table["is_train"]==0]
all_scores = only_test["gb_score"].values.tolist() + only_test["lotte_score"].values.tolist()
print("Mean score lotte and gabriel together (test):", np.mean(all_scores))
# other method: average per video scores first:
mean_scores = 0.5* (only_test["gb_score"].values + only_test["lotte_score"].values)
print("Mean score lotte and gabriel together (test) - other method:", np.mean(mean_scores))
print(np.vstack([only_test["gb_score"].values, only_test["lotte_score"].values]))
only_test["mean_scores"] = mean_scores.tolist()
only_test.groupby("label").agg({"mean_scores":"mean"})
```
#### Test whether test better train significant
```
from scipy.stats import ttest_ind, ttest_rel, wilcoxon, mannwhitneyu
only_train = final_table[final_table["is_train"]==1]
all_train_scores = only_train["gb_score"].values.tolist() + only_train["lotte_score"].values.tolist()
only_test = final_table[final_table["is_train"]==0]
all_test_scores = only_test["gb_score"].values.tolist() + only_test["lotte_score"].values.tolist()
print("means", np.mean(all_train_scores), np.mean(all_test_scores))
print("Ttest ind:", ttest_ind(all_train_scores,all_test_scores, equal_var=False))
print("ttest related:", ttest_rel(all_train_scores,all_test_scores))
print("Wilcoxon:", wilcoxon(all_train_scores,all_test_scores))
print("mannwhitneyu", mannwhitneyu(all_train_scores,all_test_scores))
# Ttest related
# Examples for use are scores of the same set of student in different exams,
# or repeated sampling from the same units. The test measures whether the average score
# differs significantly across samples (e.g. exams). If we observe a large p-value, for
# example greater than 0.05 or 0.1 then we cannot reject the null hypothesis of identical average scores
print(len(all_train_scores), len(all_test_scores))
plt.scatter(range(len(all_test_scores)), all_test_scores)
plt.scatter(range(len(all_train_scores)), all_train_scores)
```
#### Grouped for separate scores
```
only_test = final_table[final_table["is_train"]==0]
grouped = only_test.groupby("label").agg({"lotte_score":"mean", "gb_score":"mean"})
grouped
only_test = only_test.fillna("none")
gb_all_with_consolidations = only_test[only_test["gb_patterns"].str.contains("onsolida")]
print("number of videos with consolidations", len(gb_all_with_consolidations))
print("GB heatmap highlights consolidation", len(gb_all_with_consolidations[gb_all_with_consolidations["gb_heatmap_patterns"].str.contains("onsolida")]))
print("Lotte heatmap highlights consolidation", len(gb_all_with_consolidations[gb_all_with_consolidations["lotte_heatmap_patterns"].str.contains("onsolida")]))
gb_all_with_alines = only_test[only_test["gb_patterns"].str.contains("A")]
print("number of videos with A lines", len(gb_all_with_alines))
print("GB heatmap highlights A lines", len(gb_all_with_alines[gb_all_with_alines["gb_heatmap_patterns"].str.contains("A")]))
print("Lotte heatmap highlights A lines", len(gb_all_with_alines[gb_all_with_alines["lotte_heatmap_patterns"].str.contains("A")]))
gb_all_with_blines = only_test[only_test["gb_patterns"].str.contains("B")]
print("number of videos with B lines", len(gb_all_with_blines))
print("GB heatmap highlights B lines", len(gb_all_with_blines[gb_all_with_blines["gb_heatmap_patterns"].str.contains("B")]))
print("Lotte heatmap highlights B lines", len(gb_all_with_blines[gb_all_with_blines["lotte_heatmap_patterns"].str.contains("B")]))
print("Note: Lotte usually writes that it catches ONE bline in the video, or beginning of bline")
class_wise = []
for pattern in ["onsol", "B", "A"]:
print("--------", pattern)
gb_all_with_pattern = only_test[only_test["gb_patterns"].str.contains(pattern)]
for classe in ["cov", "pne", "reg"]:
class_filtered = gb_all_with_pattern[gb_all_with_pattern["label"]==classe]
print(classe, pattern, len(class_filtered))
# gb_all_with_pattern = class_filtered[class_filtered["gb_patterns"].str.contains(pattern)]
number_found = 0.5*(len(class_filtered[class_filtered["gb_heatmap_patterns"].str.contains(pattern)])
+ len(class_filtered[class_filtered["lotte_heatmap_patterns"].str.contains(pattern)]))
if len(class_filtered)>0:
print(classe, number_found/len(class_filtered))
# print(gb_all_with_pattern["label"])
from matplotlib import rc
rc('text', usetex=False)
fig, ax = plt.subplots()
rects = ax.barh(["Consolidations \n (pneumonia)", "A-lines \n (healthy)", "Pleural line \n (healthy if regular)", "B-lines \n (COVID-19)"], [17/18, 8/13, 9/20, 3/12], width
, color = ["palegreen","greenyellow","sandybrown", "indianred"])
ax.set_xlim(0,1)
# Add some text for labels, title and custom x-axis tick labels, etc.
# ax.set_ylabel('Scores')
# ax.set_title('Scores by group and gender')
# ax.set_yticks(["Consolidations \n (pneumonia)", "A-lines \n (healthy)", "Pleural line \n (healthy if regular)", "B-lines \n (COVID-19)"], fontsize=13)
ax.set_xlabel("Ratio of samples highlighted by CAM", fontsize=13)
ax.legend()
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects)
fig.tight_layout()
plt.figure(figsize=(6,3))
width=0.5
plt.barh(["Consolidations \n (pneumonia)", "A-lines \n (healthy)", "Pleural line", "B-lines \n (COVID-19)"], [17/18, 8/13, 9/20, 3/12], width
, color = ["palegreen","greenyellow","sandybrown", "indianred"])
plt.xlim(0,1)
plt.yticks(fontsize=13)
plt.xlabel("Ratio of samples highlighted by CAM", fontsize=13)
plt.tight_layout()
plt.savefig("barplot_cam.pdf")
print("FROM GABRIELS PATTERNS:")
for pattern in ["onsolida", "A", "B", "ronchogram", "ffusion"]:
print("-------------------")
gb_all_with_pattern = only_test[only_test["gb_patterns"].str.contains(pattern)]
print("number of videos with ", pattern, len(gb_all_with_pattern))
print("GB heatmap highlights ", pattern, len(gb_all_with_pattern[gb_all_with_pattern["gb_heatmap_patterns"].str.contains(pattern)]))
print("Lotte heatmap highlights ", pattern, len(gb_all_with_pattern[gb_all_with_pattern["lotte_heatmap_patterns"].str.contains(pattern)]))
print("---------------")
print("Note: observed that both MDs agreed where consolidations are found")
print("Note: Lotte usually writes that it catches ONE bline in the video, or beginning of bline")
print("FROM LOTTES PATTERNS:")
for pattern in ["onsolida", "A", "B", "ffusion", "leura"]:
print("-------------------")
gb_all_with_pattern = only_test[only_test["lotte_patterns"].str.contains(pattern)]
print("number of videos with ", pattern, len(gb_all_with_pattern))
print("GB heatmap highlights ", pattern, len(gb_all_with_pattern[gb_all_with_pattern["gb_heatmap_patterns"].str.contains(pattern)]))
print("Lotte heatmap highlights ", pattern, len(gb_all_with_pattern[gb_all_with_pattern["lotte_heatmap_patterns"].str.contains(pattern)]))
print("---------------")
print("Note: observed that both MDs agreed where consolidations are found")
print("Note: Lotte usually writes that it catches ONE bline in the video, or beginning of bline")
print("overall number of videos", len(only_test))
for name in ["gb", "lotte"]:
print("---------- "+name+" --------------")
for pattern in ["uscle", "fat", "skin"]:
print(pattern, np.sum(only_test[name+"_heatmap_patterns"].str.contains(pattern)))
```
#### Notes:
GB: 1 time "Avoids the liver, I'm impressed, but several times "tricked by the liver"
## Backups
## Test gradcam
```
normal_eval = Evaluator(ensemble=False, split=0) # , model_id="")
vid_in = vid_in_path + "Pneu_liftl_pneu_case3_clip5.mp4"
img = cv2.imread("../../data/my_found_data/Cov_efsumb1_2.png")
img = cv2.imread("../../data/pocus_images/convex/Cov_blines_covidmanifestation_paper2.png")
img = evaluator.preprocess(img)
grad = GradCAM()
out_map = grad.explain(img[0], evaluator.models[0], 0, return_map=False, layer_name="block5_conv3", zeroing=0.6)
plt.imshow(out_map.astype(int))
out_cam = get_class_activation_map(evaluator.models[0], img, 1, heatmap_weight=0.1, zeroing=0.8)
```
### Check cross val
```
check = "../../data/cross_validation"
file_list = []
for folder in os.listdir(check):
if folder[0]==".":
continue
for classe in os.listdir(os.path.join(check, folder)):
if classe[0]==".": # or classe[0]=="u":
continue
uni = []
is_image = 0
for file in os.listdir(os.path.join(check, folder, classe)):
if file[0]==".":
continue
if len(file.split("."))==2:
is_image+=1
file_list.append(file)
uni.append(file.split(".")[0])
# assert file[:3].lower()==classe[:3], "wrong label"+file[:3]+classe[:3]
print(folder, classe, len(np.unique(uni)), len(uni), is_image)
assert len(file_list)==len(np.unique(file_list))
print(len(file_list))
```
## Copy from train and test folders, give new ideas, and construct mapping
```
testcam = "vid_outputs_cam_test"
files_to_process = []
for subdir in os.listdir(testcam):
if subdir[0]=="." or subdir=="not taken" :
continue
for f in os.listdir(os.path.join(testcam, subdir)):
if f[0]==".":
continue
if not os.path.exists(os.path.join("vid_outputs_cam", f)):
print("does not exist in train", subdir, f)
# if not "RUQ" in f:
# todo.append(f.split(".")[0])
else:
files_to_process.append(os.path.join(subdir, f))
# print(todo)
# code to copy files to randomized thing
import shutil
drop_cams_dir = "vids_to_check"
test_cam_dir = "vid_outputs_cam_test"
train_cam_dir = "vid_outputs_cam"
# create directory
if not os.path.exists(drop_cams_dir):
os.makedirs(drop_cams_dir)
# give random ids
ids = np.random.permutation(len(files_to_process))
# define dataframe columns
new_fname = []
old_fname = []
is_train = []
fold = []
for i, f_name_path in enumerate(files_to_process):
split_name, f_name = tuple(f_name_path.split(os.sep))
split = int(split_name[-1])
# randomly add to model2
out_f_name = "video_"+str(ids[i])+"_model_"
old_fname.append(f_name)
old_fname.append(f_name)
rand_folder_train = np.random.rand()<0.5
print("train gets 1?", rand_folder_train)
# copy train data
train_outfname = out_f_name + str(int(rand_folder_train)) + ".mpeg"
train_to_path = os.path.join(drop_cams_dir, train_outfname)
cp_from_path = os.path.join(train_cam_dir, f_name)
# append for df
is_train.append(1)
fold.append(split)
new_fname.append(train_outfname)
print("TRAIN:", cp_from_path, train_to_path)
shutil.copy(cp_from_path, train_to_path)
# copy test
test_outfname = out_f_name + str(int(not rand_folder_train)) + ".mpeg"
test_to_path = os.path.join(drop_cams_dir, test_outfname)
cp_from_path = os.path.join(test_cam_dir, split_name, f_name)
# append for df
fold.append(split)
is_train.append(0)
new_fname.append(test_outfname)
print("TEST:", cp_from_path, test_to_path)
shutil.copy(cp_from_path, test_to_path)
df = pd.DataFrame()
df["previous_filename"] = old_fname
df["new_filename"] = new_fname
df["is_train"] = is_train
df["fold"] = fold
df.head(30)
df.to_csv(drop_cams_dir+"/mapping.csv")
iclus_dir = "test_data_regular/pat1"
# out_path = "iclus_videos_processed"
FRAMERATE = 3
MAX_FRAMES = 30
for fn in os.listdir(iclus_dir):
if fn[0]==".":
continue
cap = cv2.VideoCapture(os.path.join(iclus_dir, fn))
n_frames = cap.get(7)
frameRate = cap.get(5)
nr_selected = 0
every_x_image = int(frameRate / FRAMERATE)
while cap.isOpened() and nr_selected < MAX_FRAMES:
ret, frame = cap.read()
if (ret != True):
break
print(cap.get(1), cap.get(2), cap.get(3), cap.get(4), cap.get(5), cap.get(6), cap.get(7))
h, w, _ = frame.shape
# print(h,w)
plt.imshow(frame[30:400, 80:450])
plt.show()
# SAVE
# if ((frameId+1) % every_x_image == 0):
# # storing the frames in a new folder named test_1
# filename = out_path + fn + "_frame%d.jpg" % frameId
# cv2.imwrite(filename, frame)
# nr_selected += 1
# print(frameId, nr_selected)
cap.release()
import shutil
check = "../../data/cross_validation_segmented"
out = "../../data/cross_validation_segmented_new"
for folder in os.listdir(check):
if folder[0]==".":
continue
os.makedirs(os.path.join(out, folder))
for classe in os.listdir(os.path.join(check, folder)):
os.makedirs(os.path.join(out, folder, classe))
if classe[0]==".": # or classe[0]=="u":
continue
for f in os.listdir(os.path.join(check, folder, classe)):
if f[-3:]=="gif":
shutil.copy(os.path.join(check, folder, classe, f), os.path.join(out, folder, classe, f[:-4]))
elif f[-3:] =="npz":
shutil.copy(os.path.join(check, folder, classe, f), os.path.join(out, folder, classe, f))
```
### Cut Lotte's videos
```
file_list = ["pat1Image_132943.mpeg",
"pat1Image_133043.mpeg",
"pat1Image_133138.mpeg",
"pat1Image_133232.mpeg",
"pat1Image_133327.mpeg",
"pat1Image_133410.mpeg",
"pat2Image_133824.mpeg",
"pat2Image_133952.mpeg",
"pat2Image_134138.mpeg",
"pat2Image_134240.mpeg",
"pat2Image_134348.mpeg",
"pat2Image_134441.mpeg",
"pat3Image_134711.mpeg",
"pat3Image_134811.mpeg",
"pat3Image_134904.mpeg",
"pat3Image_135026.mpeg",
"pat3Image_135128.mpeg",
"pat3Image_135215.mpeg",
"pat4Image_135904.mpeg",
"pat4Image_140024.mpeg",
"pat4Image_140238.mpeg",
"pat4Image_140434.mpeg",
"pat4Image_140606.mpeg",
"pat4Image_140705.mpeg"]
copy_path = "../../data/pocus_videos/convex/"
for f in file_list:
video_path = "reg_propro/"+f
# cap = cv2.VideoCapture(video_path)
# print(cap.get(7))
# cap.release()
print("Reg_"+f)
shutil.copy(video_path, copy_path+"Reg_"+f)
out_dir = "reg_propro/pat4"
in_dir = "test_data_regular/pat4"
for vid in os.listdir(in_dir):
if vid[0]==".":
continue
video_path = os.path.join(in_dir, vid)
cap = cv2.VideoCapture(video_path)
images = []
counter = 0
while cap.isOpened():
ret, frame = cap.read()
if (ret != True):
break
if counter<1:
plt.imshow(frame[30:400, 80:450]) # ICLUS: [70:570, 470:970]) # [25:350, 100:425]) # LOTTE:[30:400, 80:450]
plt.show()
counter += 1
continue
counter += 1
img_processed = frame[30:400, 80:450]
images.append(img_processed)
cap.release()
images = np.asarray(images)
print(images.shape)
io.vwrite(out_dir+ vid.split(".")[0]+".mpeg", images, outputdict={"-vcodec":"mpeg2video"})
```
### Display logo on frames
```
test_vid = "../../data/pocus_videos/convex/Pneu-Atlas-pneumonia2.gif"
cap = cv2.VideoCapture(test_vid)
ret, frame = cap.read()
cap.release()
plt.imshow(frame)
plt.show()
logo = plt.imread("Logo.png")
logo = cv2.resize(logo, (50,50), )
plt.imshow(logo)
```
| github_jupyter |
# Le Machine Learning, c'est pour tout le monde
## Le Machine Learning, kézako ?
Le Machine Learning, ou apprentissage automatique en français, est une façon de programmer les ordinateurs de façon à ce qu'ils exécutent une tâche souhaité sans avoir programmé explicitement les instructions pour cette tâche.
En programmation classique, on a des données en entrée (input), une suite d'instruction qui vont s'appliquer sur ces données, et un résultat en sortie (output).
En Machine Learning, on a des données en entrée, qui vont être fournies à un programme qui va "apprendre" un modèle de façon à ce que le résultat en sortie corresponde à ce que l'on souhaite.
Dans un second temps, on peut donner de nouvelles données au modèle qui va donc produire des résultats nouveaux. Si ces résultats correspondent à ce que l'on attendait, alors on est content et on dit que le modèle a appris.

L'apprentissage, ça n'est pas une étape magique, c'est juste la solution d'un problème mathématique d'optimisation. En gros, on cherche à minimiser l'erreur dans un espace abstrait qui contient plein de modèles de Machine Learning.
Ca peut paraître un peu compliqué, mais il y a plein de mathématiciens qui ont réfléchi à la question depuis longtemps, et on a des algorithmes efficaces pour ça.

*Une illustration de la technique d'optimisation de descente de gradient, qui consiste à suivre la plus forte pente pour trouver le minimum (un peu comme une bille lâchée du haut d'une montagne qui va aller s'arrêter au fond de la vallée).*
**Pourquoi appelle-t-on ça un modèle de Machine Learning, et non pas un programme comme tout le monde ?**
D'abord, parce qu'on est un peu snob, et ensuite, parce que les modèles programmés peuvent aussi souvent s'exprimer en termes mathématiques, et on peut faire des preuves dessus, ect.
Un modèle de Machine Learning, c'est une sous-espèce de programme informatique, qui est codé de façon un peu différente, c'est tout.
## Si les machines peuvent apprendre, pourquoi s'embêter à programmer ?
Donc pour faire du Machine Learning, il faut un problème, une tâche à résoudre, et des exemples de comment on veut que la tâche soit réalisée.
Ce besoin de données (les exemples) explique pourquoi on ne se sert pas du Machine Learning partout. Il y a des tas de tâches pour lesquelles il est plus simple d'expliquer à l'ordinateur comment faire étape par étape, de lui donner les instructions, plutôt que de trouver tout un tas d'exemples du comportement qu'on veut apprendre. Le Machine Learning ne remplace donc absoulement pas la programmation !
Par contre, il y a des tâches pour lesquelles c'est plus compliqué d'expliquer comment on veut que ça soit fait. Par exemple, décider de si une chanson est triste ou au contraire joyeuse. On peut bien trouver des idées de pourquoi telle chanson est mélancolique ou telle autre nous met en pleine forme, les chansons triste sont peut-être plus lentes, les chansons joyeuses ont souvent de la batterie, mais c'est compliqué d'expliquer à un ordinateur comment faire la différence. Par contre, on peut facilement classer les chansons dans deux catégories, triste et joyeuse, et donner cette classification ainsi que le spectre audio des chansons à un ordinateur, et lui demander d'apprendre de lui-même à reconnaître les chansons tristes ou joyeuses !
### Quelques instructions pour la suite
*Les cellules de codes s'exécutent en cliquant sur la touche play dans la barre en haut, ou bien avec majuscule + Entrée ou Ctrl + Entrée au clavier. La cellule actuellement sélectionnée a une barre bleue à droite.*
*Pour tous les morceaux de code à trou, il est possible de charger la solution en retirant le # de la ligne `# %load solutions/[nom_de_lexercice].py` et en exécutant la cellule.*
## Mon premier modèle de Machine Learning
Pour notre premier modèle, on ne va pas apprendre à distinguer les chansons tristes des autres, on va classer quelque chose de plus simple.
On a un ensemble de données (un dataset en anglais) qui, pour chaque configuration d'une petite expérience de physique, nous dit si la balance penche vers la droite ou vers la gauche.

On a quatre variables pour chaque expérience: la longueur du bras gauche (`dist_gauche`), la longueur du bras droit (`dist_droit`), la masse sur le plateau gauche (`masse_gauche`) et la masse sur le plateau droit (`masse_droit`). On veut que l'ordinateur apprenne le résultat de l'expérience (la balance penche à gauche ou à droite) à partir de ces quatre variables.
Evidemment, dans ce cas très simple, un peu de physique pourrait nous permettre de trouver la solution et de programmer explicitement les instructions. Mais supposant qu'on a oublié nos cours de physique. Que peut-on faire ?
On commence par charger les données, les exemples d'expérience pour lesquels on a la réponse. Ces données sont dans un fichier csv, un fichier texte où les valeurs sont séparées par des virgules (*comma-separated values* en anglais).
On utilise le module csv de la librairie standart pour stocker nos données dans une grande liste, où chaque élément représentera une expérience.
Le code pour charger les données est donné à titre indicatif, pas la peine de s'en inquiéter pour l'instant !
```
import csv
train_dataset = []
with open('data/apprentissage.csv') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
train_dataset.append({'label': row[0], 'variables': list(map(int, row[1:]))})
```
Une expérience (un point de donnée) est caractérisée par les valeurs des quatre variable dont on a parlé ci-dessus (`masse_gauche`, `dist_gauche`, `masse_droite`, `dist_droite`), et par son **label** (le résultat de l'expérience, est-ce que la balance penche à droite (**R**) ou à gauche (**L**).
On peut par exemple regarder le premier point de données du dataset.
```
train_dataset[0]
```
On voit que les variables `masse_gauche`, `dist_gauche`, `masse_droite`, `dist_droite` ont pour valeur respectivement 5, 4, 5 et 5, et que le résultat de l'expérience est que la balance penche à droite (label **R**).
Toutes les valeurs des variables ont été normalisées entre 1 et 5, c'est une pratique courante en Machine Learning pour simplifier l'apprentissage.
Maintenant que l'on a des données, comment va-t-on faire pour que l'ordinateur apprenne ?
On va utiliser l'algorithme des plus proches voisins.
Cet algorithme fonctionne sur le principe suivant: on va regarder quels sont les points qui sont proches du point qu'on veut évaluer en utilisant les variables qu'on a collectées. Concrètement, on cherche dans notre dataset d'apprentissage les expériences qui sont les plus similaires à l'expérience qu'on veut évaluer. Et on se dit que les choses vont se passer à peu près pareil pour notre expérience inconnue, et donc on va renvoyer le même label (on prédit la même classe).

Sur le schéma ci-dessus, le point vert est entouré de points rouges, on va donc lui assigner le label rouge.
Pour le point violet, c'est un peu plus compliqué: si on prend le point le plus proche, il serait bleu; mais si on fait la moyenne sur les 3 points les plus proches, il serait rouge.
En pratique, on fait souvent la moyenne sur quelques points (3 ou 5), afin de lisser les cas limites (comme celui du point violet).
Notre modèle de Machine Learning va être ici composé de 3 choses :
* le dataset d'apprentissage qu'on va utiliser pour trouver les voisins
* la valeur *k* du nombre de voisins qu'on va considérer pour faire nos évaluations
* la fonction qu'on va utiliser pour calculer la distance
Commençons par définir cette fonction de distance.
On va utiliser une distance dite *de Manhattan*:
$$ d(a, b) = |a_1 - b_1| + |a_2 - b_2| + ...$$
qui calcule la différence sur chaque variable, puis somme les valeurs absolues de ces différences.
On va donc parcourir toutes les variables des points de données (les expériences) *a* et *b* et ajouter la valeur absolue de la différence.
La fonction `zip(a, b)` va renvoyer *a_1, b_1*, puis *a_2, b_2*, ect...
C'est à vous de jouer, complétez le code ci-dessous !
(Un dernier indice, la fonction `math.fabs` vous donnera la valeur absolue.)
```
import math
def distance(a, b):
d = 0
for i, j in zip(a, b):
d += ... # à compléter
return d
# %load solutions/distance.py
```
Pour tester si notre fonction fonctionne correctement, on va calculer la distance entre les deux premiers points du dataset. Vous devrier obtenir 11.
```
distance(train_dataset[0]['variables'], train_dataset[1]['variables'])
```
Pour le nombre de voisins que l'on va considérer, on va prendre *k = 3*.
```
k = 3
```
C'est parti, on peut à présent se lancer dans le coeur de l'algorithme.
Quand on reçoit un point à évaluer, on veut connaître les labels des *k* plus proches voisins. Pour cela, on va calculer la distance du nouveau point à tous les points du dataset d'apprentissage, et stocker à chaque fois la valeur et le label.
(Indice: on a déjà écrit une fonction pour calculer la distance; on veut prendre uniquement la partie 'variables' du `train_point` pour calculer la distance.)
```
def calculer_distances(dataset, eval_point):
distances = []
for train_point in dataset:
distances.append({'label': train_point['label'],
'distance': ... # à compléter
})
return distances
# %load solutions/calculer_distances.py
```
Maintenant qu'on a toutes les distances, on peut trouver les *k* points les plus proches, en triant par distance. Python nous permet de faire ça facilement, mais les détails sont un peu techniques.
```
def trier_distances(distances):
return sorted(distances, key=lambda point: point['distance'])
```
On peut prendre les trois premiers éléments de la liste avec `maListe[:3]`, et compter le nombre de **R** et de **L** dans les labels.
```
def choisir_label(distances_triees, k):
plus_proches_voisins = distances_triees[:k]
count_R = 0
count_L = 0
for voisin in plus_proches_voisins:
if voisin['label'] == 'R':
count_R += 1
if voisin['label'] == 'L':
... # à compléter
if count_R > count_L:
return 'R'
else:
return 'L'
# %load solutions/choisir_label.py
```
A présent on combine toutes nos fonctions pour avoir l'algorithme final.
```
def algorithme_des_plus_proches_voisins(dataset, eval_point, k):
distances = calculer_distances(dataset, eval_point)
distances_triees = trier_distances(distances)
label = choisir_label(distances_triees, k)
return label
```
Maintenant, on va charger de nouvelles données pour pouvoir évaluer notre algorithme.
```
import csv
eval_dataset = []
with open('data/evaluation.csv') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
eval_dataset.append({'true_label': row[0], 'variables': list(map(int, row[1:]))})
```
On peut regarder ce que ça donne pour notre premier point d'évaluation.
```
algorithme_des_plus_proches_voisins(train_dataset, eval_dataset[0]['variables'], k)
```
On prédit que la balance va pencher à gauche !
Qu'en est-il en réalité ?
```
eval_dataset[0]['true_label']
```
C'est bien ce qui a été observé ! Notre algorithme a appris, sans avoir besoin de lui expliquer les règles de la physique !
Voyons combien d'erreurs notre algorithme fait sur la totalité des points d'évaluation.
```
erreurs = 0
for point in eval_dataset:
if algorithme_des_plus_proches_voisins(train_dataset, point['variables'], k) != point['true_label']:
erreurs += 1
print("Nombre d'erreurs %d sur %d exemples" % (erreurs, len(eval_dataset)))
```
Notre algorithme a fait quelques erreurs, mais il a correctement prédit la majorité des situations.
Félicitations, vous avez programmé votre premier modèle de Machine Learning !
| github_jupyter |
# MMA 831 DOS1
This assignment requires R but is good Python practice.
## Some more visualizations using Python + Seaborn
```
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.set_cmap('Set2')
import seaborn as sns
colours = sns.color_palette('Set2')
plt.rcParams["figure.figsize"] = (15,15)
```
## Load Data
```
dat = pd.read_csv('adv_sales.csv')
```
## Summary statistics etc.
```
dat.shape
dat.info()
```
Looks like there's an Store ID column, let's delete that, not important.
```
dat_noids = dat.drop('Unnamed: 0', axis = 1)
dat_noids.describe()
```
Looks like no outliers from here.
```
dat_noids.head()
```
## Check for missing values
```
dat_noids.isnull().sum()
```
No missing values.
## Visualizations
Here comes the fancy stuff!
### Histograms
```
dat_noids.hist(bins = 20, figsize = (20,20))
plt.show()
```
Looks to be mostly normal, except for price and satisfaction.
### Pair plots
Use `diag_kind = 'kde'` to show kernel density estimates in the diagonals.
`kind = reg` shows regression lines for each scatterplot. This increases run time by A LOT. Can also use a correlation matrix (next plot) to visualize pairwise linear relationships.
```
sns.pairplot(dat_noids, palette = colours, kind = 'reg', diag_kind = 'kde')
plt.show()
```
### Correlation matrix
If wanted can draw a white mask to only show half of the plot (because each half is the same), ~~but I'm too lazy~~ I did it anyway but prefer the old one more :grin:
First we define a diverging colour palette:
```
diverging_palette = sns.diverging_palette(220, 10, as_cmap = True)
```
Plot it:
Make sure to include `vmin` and `vmax` or you will get a bunch of blue cells that tell you nothing...
```
# sns.heatmap(dat_noids.corr(), cmap = diverging_palette, square = True, annot = True)
# mask = np.zeros_like(dat_noids.corr(), dtype=np.bool)
# mask[np.triu_indices_from(mask)] = True
sns.heatmap(dat_noids.corr(), cmap = diverging_palette, square = True, annot = True,
vmin = -1, vmax = 1)
plt.show()
```
Aside from `store` and `billboard`, no strong correlation. `price` and `sat` have weak correlation, which is understandable.
### Plot scatterplots for advertising channels
```
fig, ax = plt.subplots(1, 3, figsize = (15,5), sharey = True)
sns.scatterplot(x = 'store', y = 'sales', data = dat_noids, ax = ax[0])
sns.scatterplot(x = 'billboard', y = 'sales', data = dat_noids, ax = ax[1])
sns.scatterplot(x = 'printout', y = 'sales', data = dat_noids, ax = ax[2])
fig.show()
```
Alternatively put it all on the same graph.
(This took me an hour to figure out. Thanks Google and StackOverflow!)
```
fig, ax = plt.subplots(figsize = (10,10))
ax.grid(True, alpha = 0.5)
ax.scatter(x = 'store', y = 'sales', data = dat_noids, color = '#66c2a5')
ax.scatter(x = 'printout', y = 'sales', data = dat_noids, color = '#fc8d62')
ax.scatter(x = 'billboard', y = 'sales', data = dat_noids, color = '#8da0cb')
ax.set_title('Advertising vs. Sales')
ax.set_ylabel('Sales')
ax.set_xlabel('Advertising')
ax.legend(['Store', 'Printout', 'Billboard'], loc = 4, fontsize = 'large')
fig.show()
```
Alternatively, do a pairplot with only the three interesting features:
```
sns.pairplot(dat_noids[['sales', 'store', 'billboard', 'printout']], palette = colours)
plt.show()
```
## Split Train and Test
```
# TODO
```
## Linear Regression
```
# TODO
```
| github_jupyter |
# Collect Physicists Raw Data
The goal of this notebook is to collect demographic data on the list of [physicists notable for their achievements](../data/raw/physicists.txt). Wikipedia contains this semi-structured data in an *Infobox* on the top right side of the article for each physicist. However, similar data is available in a more machine readable, [JSON](https://www.json.org/) format from [DBpedia](https://wiki.dbpedia.org/about). We will need to send HTTP requests to DBpedia to get the JSON data. For an example, compare *Albert Einstein's* [Wikipedia infobox](https://en.wikipedia.org/wiki/Albert_Einstein) to his [DBPedia JSON](http://dbpedia.org/data/Albert_Einstein.json). It is important to realize, that although the data is similar, it is not identical.
The shortcomings of Wikipedia infoboxes and the advantages of DBpedia datasets are explained in section 4.3 of [DBpedia datasets](https://wiki.dbpedia.org/services-resources/datasets/dbpedia-datasets#h434-10). But basically the summary is that DBpedia data is much cleaner and better structured than Wikipedia Infoboxes as it is based on hand-generated mappings of Wikipedia infoboxes / templates to a [DBpedia ontology](https://wiki.dbpedia.org/services-resources/ontology). Consequently, we will be using DBpedia as the data source for this project.
However, DBpedia does have the disadvantage that its content is roughly 6-18 months behind updates applied to Wikipedia content. This is due to its data being generated from a [static dump of Wikipedia content](https://wiki.dbpedia.org/online-access/DBpediaLive) in a process that takes approximately 6 months. The fact that the data is not in sync with the latest Wikipedia content is not of great significance for this project as the data is edited infrequently. Also when edits are made, they tend to be only minor.
## Setting the Environment
A few initialization steps are needed to setup the environment:
- The locale needs to be set for all categories to the user’s default setting (typically specified in the LANG environment variable) to enable correct sorting of physicists names with accents.
- A bool constant `FETCH_JSON_DATA` needs to be set to decide whether to fetch the json data. Set to False so that the previously fetched data is used. In this case the results of the study are guaranteed be reproducible. Set to True so that the latest data is fetched. In this case it is possible that the results of the study will change.
```
import locale
locale.setlocale(locale.LC_ALL, '')
FETCH_JSON_DATA = False
```
## Constructing the URLs
To make the HTTP requests, we will need a list of URLs representing the resources (i.e the physicists). It's fairly easy to construct these URLs from the list of notable physicists. However, it's important to "quote" any physicist name in unicode since unicode characters are not allowed in URLs. OK let's create the list now.
```
import gzip
import os
import shutil
from collections import OrderedDict
import jsonlines
import pandas as pd
from src.data.jsonl_utils import read_jsonl
from src.data.url_utils import DBPEDIA_DATA_URL
from src.data.url_utils import fetch_json_data
from src.data.url_utils import urls_progress_bar
def construct_urls(file='../data/raw/physicists.txt'):
"""Construct DBpedia data URLs from list in file.
Args:
file (str): File containing a list of url filepaths
with spaces replacing underscores.
Returns:
list(str): List of URLs.
"""
with open(file, encoding='utf-8') as file:
names = [line.rstrip('\n') for line in file]
urls = [DBPEDIA_DATA_URL + name.replace(' ', '_') + '.json'
for name in names]
return urls
urls_to_fetch = construct_urls()
assert(len(urls_to_fetch) == 1069)
```
## Fetching the Data
Now we have the list of URLs, it's time to make the HTTP requests to acquire the data. The code is asynchronous, which dramatically helps with performance. It is important to set the `max_workers` parameter sensibly in order to crawl responsibly and not hammer the site's server. Although the site seems to be rate limited, it's still good etiquette.
```
jsonl_file = '../data/raw/physicists.jsonl'
if FETCH_JSON_DATA:
json_data = fetch_json_data(urls_to_fetch, max_workers=20, timeout=30,
progress_bar=urls_progress_bar(len(urls_to_fetch)))
else:
json_data = read_jsonl('../data/raw/physicists.jsonl' + '.gz')
```
Let's sort the data alphabetically by URL, confirm that all the data was fetched and take a look at the first JSON response.
```
if FETCH_JSON_DATA:
json_data = OrderedDict(sorted(json_data.items(), key=lambda x: locale.strxfrm(x[0])))
assert(len(json_data) == 1069)
print(list(json_data.keys())[0])
print(list(json_data.values())[0])
else:
assert(len(json_data) == 1058)
print(json_data[0])
```
It is clear that every request successfully received a response. However, we see that some responses came back empty from the server. Basically, although there are Wikipedia pages for these physicists, they do not have a corresponding page in DBpedia (or the page in DBpedia has a different name). Not to worry, there are only 11 and they are not so famous, so we will just exclude these "Z-listers" from the analysis.
```
if FETCH_JSON_DATA:
urls_to_drop = [url for (url, data) in json_data.items() if not data]
assert(len(urls_to_drop) == 11)
display(urls_to_drop)
if FETCH_JSON_DATA:
json_data = [data for data in json_data.values() if data]
assert(len(json_data) == 1058)
```
## Persisting the Data
Now that we have the list of JSON responses, we would like to persist them for later analysis. We will use [Json Lines](http://jsonlines.org/) as it seems like a convenient format for storing structured data that may be processed one record at a time.
```
if FETCH_JSON_DATA:
with jsonlines.open(jsonl_file, 'w') as writer:
writer.write_all(json_data)
```
Let's do a quick sanity check to make sure the file contains the expected number of records.
```
if FETCH_JSON_DATA:
json_lines = read_jsonl(jsonl_file)
assert(len(json_lines) == 1058)
```
Finally, let's compress the file to reduce its footprint.
```
if FETCH_JSON_DATA:
with open(jsonl_file, 'rb') as src, gzip.open(jsonl_file + '.gz', 'wb') as dest:
shutil.copyfileobj(src, dest)
os.remove(jsonl_file)
```
| github_jupyter |
# 1-1. AIとは何か?簡単なAIを設計してみよう
AIブームに伴って、様々なメディアでAIや機械学習、深層学習といった言葉が使われています。本章ではAIと機械学習(ML)、深層学習の違いを理解しましょう。
## 人工知能(AI)とは?
そもそも人工知能(AI)とは何でしょうか?

Wikipedia[1]によると、人工知能について以下のように書かれています。
人工知能(じんこうちのう、英: artificial intelligence、AI〈エーアイ〉)とは「言語の理解や推論、問題解決などの知的行動を人間に代わってコンピュータに行わせる技術」。
要は知的行動をコンピュータが行う技術のことですね。。もう少し歴史を遡ってみると、過去のコンピュータは日本語で計算機、その言葉通り「計算」をするための機械でした。今で言うと「電卓」そのものですね。つまり、電卓で行う計算
(左の図)ではできない絵の認識や翻訳(右の図)などを知的な処理とし、その処理をコンピュータ、すなわち電卓などの計算機で行うんですね。
つまり、こんな感じでしょうか。↓

あれ?と思われるかもしれませんが、現実は皆さんの身の回りのコンピュータが認識や翻訳をやってますよね。コンピュータに脳は入っていません。つまり、脳が行っている「知的な処理」というのをコンピュータが得意な電卓で行う計算に置き換えて処理しているのです。
## 今のコンピュータでできること

そもそもコンピュータが得意な処理とは何でしょうか?それは電卓の例でもわかるように、数値の計算です。あと、数値の大小比較も得意です。つまり、数値にしてしまえばコンピュータで色々できそうですね。実際、写真の加工や音声の合成などはそれぞれデータを数値化することでコンピュータが処理できるようにしています。AIも数値を扱う問題に変換してしまえばよさそうですね。
# 簡単なAIを作ってみよう
## ミニトマトを出荷用に収穫するかどうか判定するAI

早速AIを作ってみてどんなものか体験してみましょう。ここではミニトマト農家になったつもりで、収穫するかどうかを自動で見分けてくれるAIを作ることにしましょう。
## コンピュータが処理できる数値の計算・比較処理に直す

コンピュータは数値の計算と比較が得意なので、数値に直しましょう。例えばトマトの赤みを数値化することは画像処理(後半の章でOpenCVというライブラリを説明します)で比較的簡単にできます。市場価格はスーパーとかでトマトのパックを買って1個当たりの値段を算出すればわりと正確な値段が出ると思います。ここではあくまで私が適当に付けた値段ですが。。
最後に、市場価格から収穫する/しないかどうかを決めます。これは予想した市場価格に対してあらかじめ決めておいた値との比較でできますね。
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
x = np.array([1,2,3,4,5])
y = np.array([4,12,28,52,80])
plt.plot(x,y, label="tomato", marker="o")
```
この関係をグラフに書いてみましょう。そうです、この関係を正しく推定できるとミニトマトの収穫を判断するAIができあがるのです!
さて、どうやって推定しましょうか。。
## 直線で近似してみる
学生時代(私は中学生)に習った直線の方程式で推定してみましょう。2次元グラフは任意の2点(x1,y1)-(x2,y2)間を直線で表現できます。
直線による表現→y=ax+b でしたから、先ほどのグラフの数値(x1,y1)=(1,4), (x2,y2)=(5,80)より
傾き: a=(y2-y1)/(x2-x1)=(80-4)/(5-1)=76/4=19
切片: b=y-ax=4-19×1=-15
従って y=19x-15 という直線式を得ました。これに間の値を代入してどの程度正しく予測できたかみてみましょう。
```
a = 19
b = -15
y_est1 = a * x + b
plt.plot(x,y, label="tomato", marker="o")
plt.plot(x,y_est1, label="estimation", marker="o")
```
うーん、ダメですね。。両端は上手く予想出来ていますが、その間の誤差が大きそうです。。
## 誤差を最小に抑える
トライアンドエラーで誤差が最も小さくなる直線、すなわち傾きと切片を求めるのは大変そうです。そこで、全ての点の二乗誤差の合計を最小にするようにしましょう。二乗を使うのは正負に影響を受けないようにするためです。まず、傾きaの計算式は
a= Sum of (x-xの平均)*(y-yの平均) / Sum of (x-xの平均)^2
です。計算してみましょう。
x_mean = (1+2+3+4+5)/5=3
a = 192/10=19.2 と計算できましたね。Jupyter NotebookはPythonプログラミングもできるので、試行錯誤しながらプログラミングするのにとても向いています。
切片bは
b=yの平均 - a×xの平均
です。計算するとb=-22.4です。
```
a = 19.2
b = -22.4
y_est2 = a * x + b
plt.plot(x,y, label="tomato", marker="o")
plt.plot(x,y_est2, label="estimation (mean of squared error(MSE))", marker="o")
```
おお、今度はデータの間をちょうど通る直線が引けましたね!なんとなく実際の市場価格を推定できてそうです。
## 他の近似式では?
するどい!何も直線式で近似する必要はないです。その通りです。2次式・多項式・さらに複雑な式、、色々ありすぎて困りますね。AIの設計は近似式をどうやって決めるかがポイントですがトライアンドエラーをするしかないのが現状です。それに、近似式のいろいろな値(今回は傾きと切片)を決めるのも大変そうですね。。
最近ではクラウド上の計算機を大量に使って力技で探しています。ただ、それでも探す範囲が広すぎるので全自動化はまだ難しいのが現状です。
## これまでのステップは機械学習そのものだった
実はこれまでの一連の作業は機械学習というAIの1分野の手法を使ったのでした。具体的には
色や市場価格を準備する→データを収集する
近似式を決める→モデルを設計する
切片や傾きを求める→学習を行う(ハイパーパラメータを決める)
推定がどれくらい正確かを確認する→モデルの検定を行う
をやっていたのです。最近、機械学習・AIが紛れていましたが、なんとなく両者の関係がわかってきましたでしょうか?AIを設計する=機械学習で設計する、といっても過言でないくらい今日では機械学習が主流となっています。次回は機械学習をより詳しく説明します。
## 課題
AIを使ったアプリケーション・製品について一つ調査し、レポートを提出してください。
フォーマット:PDF形式(図、文章、参考にした文献(URL))
提出先:T2Scholar
締め切り: (講義中にアナウンスします)
## 参考文献
[1] https://ja.wikipedia.org/wiki/%E4%BA%BA%E5%B7%A5%E7%9F%A5%E8%83%BD
| github_jupyter |
# Pre-traitement
```
import pyspark
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.appName('Transform') \
.getOrCreate()
sc = spark.sparkContext
```
# CountVectorizer

```
from pyspark.ml.feature import CountVectorizer
# Input data: Each row is a bag of words with a ID.
df = spark.createDataFrame([
(0, "secret prize a b c".split(" ")),
(1, "a b prize c prize".split(" "))
], ["id", "words"])
# fit a CountVectorizerModel from the corpus.
cv = CountVectorizer(inputCol="words", outputCol="features", vocabSize=5, minDF=2.0)
model = cv.fit(df)
result = model.transform(df)
result.show(truncate=False)
```
# FeatureHasher
```
from pyspark.ml.feature import FeatureHasher
dataset = spark.createDataFrame([
(2.2, True, "1", "foo"),
(3.3, False, "2", "bar"),
(4.4, False, "3", "baz"),
(5.5, False, "4", "foo")
], ["real", "bool", "stringNum", "string"])
hasher = FeatureHasher(inputCols=["real", "bool", "stringNum", "string"],
outputCol="features")
featurized = hasher.transform(dataset)
featurized.show(truncate=False)
```
# Tokenizer
```
from pyspark.ml.feature import Tokenizer, RegexTokenizer
from pyspark.sql.functions import col, udf
from pyspark.sql.types import IntegerType
sentenceDataFrame = spark.createDataFrame([
(0, "Hi I heard about Spark"),
(1, "I wish Java could use case classes"),
(2, "Logistic,regression,models,are,neat")
], ["id", "sentence"])
tokenizer = Tokenizer(inputCol="sentence", outputCol="words")
regexTokenizer = RegexTokenizer(inputCol="sentence", outputCol="words", pattern="\\W")
# alternatively, pattern="\\w+", gaps(False)
countTokens = udf(lambda words: len(words), IntegerType())
tokenized = tokenizer.transform(sentenceDataFrame)
tokenized.select("sentence", "words")\
.withColumn("tokens", countTokens(col("words"))).show(truncate=False)
```
# StopWordsRemover
```
from pyspark.ml.feature import StopWordsRemover
sentenceData = spark.createDataFrame([
(0, ["I", "saw", "the", "red", "balloon"]),
(1, ["Mary", "had", "a", "little", "lamb"])
], ["id", "raw"])
remover = StopWordsRemover(inputCol="raw", outputCol="filtered")
remover.transform(sentenceData).show(truncate=False)
```
# NGram
```
from pyspark.ml.feature import NGram
wordDataFrame = spark.createDataFrame([
(0, ["Hi", "I", "heard", "about", "Spark"]),
(1, ["I", "wish", "Java", "could", "use", "case", "classes"]),
(2, ["Logistic", "regression", "models", "are", "neat"])
], ["id", "words"])
ngram = NGram(n=3, inputCol="words", outputCol="ngrams")
ngramDataFrame = ngram.transform(wordDataFrame)
ngramDataFrame.select("ngrams").show(truncate=False)
```
# Binarizer
```
from pyspark.ml.feature import Binarizer
continuousDataFrame = spark.createDataFrame([
(0, 0.1),
(1, 0.8),
(2, 0.2)
], ["id", "feature"])
binarizer = Binarizer(threshold=0.5, inputCol="feature", outputCol="binarized_feature")
binarizedDataFrame = binarizer.transform(continuousDataFrame)
print("Binarizer output with Threshold = %f" % binarizer.getThreshold())
binarizedDataFrame.show()
```
# Analyse Par Composante Principale : PCA
```
from pyspark.ml.feature import PCA
from pyspark.ml.linalg import Vectors
data = [(Vectors.sparse(5, [(1, 1.0), (3, 7.0)]),),
(Vectors.dense([2.0, 0.0, 3.0, 4.0, 5.0]),),
(Vectors.dense([4.0, 0.0, 0.0, 6.0, 7.0]),)]
df = spark.createDataFrame(data, ["features"])
pca = PCA(k=3, inputCol="features", outputCol="pcaFeatures")
model = pca.fit(df)
result = model.transform(df).select("pcaFeatures")
result.show(truncate=False)
```
# PolynomialExpansion
```
from pyspark.ml.feature import PolynomialExpansion
from pyspark.ml.linalg import Vectors
df = spark.createDataFrame([
(Vectors.dense([2.0, 1.0]),),
(Vectors.dense([0.0, 0.0]),),
(Vectors.dense([3.0, -1.0]),)
], ["features"])
polyExpansion = PolynomialExpansion(degree=3, inputCol="features", outputCol="polyFeatures")
polyDF = polyExpansion.transform(df)
polyDF.show(truncate=False)
```
# StringIndexer
```
from pyspark.ml.feature import StringIndexer
df = spark.createDataFrame(
[(0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c")],
["id", "category"])
indexer = StringIndexer(inputCol="category", outputCol="categoryIndex")
indexed = indexer.fit(df).transform(df)
indexed.show()
```
# IndexToString
```
from pyspark.ml.feature import IndexToString, StringIndexer
df = spark.createDataFrame(
[(0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c")],
["id", "category"])
indexer = StringIndexer(inputCol="category", outputCol="categoryIndex")
model = indexer.fit(df)
indexed = model.transform(df)
print("Transformed string column '%s' to indexed column '%s'"
% (indexer.getInputCol(), indexer.getOutputCol()))
indexed.show()
print("StringIndexer will store labels in output column metadata\n")
converter = IndexToString(inputCol="categoryIndex", outputCol="originalCategory")
converted = converter.transform(indexed)
print("Transformed indexed column '%s' back to original string column '%s' using "
"labels in metadata" % (converter.getInputCol(), converter.getOutputCol()))
converted.select("id", "categoryIndex", "originalCategory").show()
```
# OneHotEncoder
```
from pyspark.ml.feature import OneHotEncoder
df = spark.createDataFrame([
(0.0, 1.0),
(1.0, 0.0),
(2.0, 1.0),
(0.0, 2.0),
(0.0, 1.0),
(2.0, 0.0)
], ["categoryIndex1", "categoryIndex2"])
encoder = OneHotEncoder(inputCols=["categoryIndex1", "categoryIndex2"],
outputCols=["categoryVec1", "categoryVec2"])
model = encoder.fit(df)
encoded = model.transform(df)
encoded.show()
from pyspark.ml.feature import IndexToString, StringIndexer
df = spark.createDataFrame(
[(0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c")],
["id", "category"])
indexer = StringIndexer(inputCol="category", outputCol="categoryIndex")
model = indexer.fit(df)
indexed = model.transform(df)
print("Transformed string column '%s' to indexed column '%s'"
% (indexer.getInputCol(), indexer.getOutputCol()))
indexed.show()
indexed.collect()
df = indexed
encoder = OneHotEncoder(inputCols=["categoryIndex"],
outputCols=["OneHot"])
model = encoder.fit(df)
encoded = model.transform(df)
encoded.show()
encoded.take(3)
```
# VectorIndexer
```
from pyspark.ml.feature import VectorIndexer
data = spark.read.format("libsvm").load("/demo/spark/mllib/sample_libsvm_data.txt")
indexer = VectorIndexer(inputCol="features", outputCol="indexed", maxCategories=10)
indexerModel = indexer.fit(data)
categoricalFeatures = indexerModel.categoryMaps
print("Chose %d categorical features: %s" %
(len(categoricalFeatures), ", ".join(str(k) for k in categoricalFeatures.keys())))
# Create new column "indexed" with categorical values transformed to indices
indexedData = indexerModel.transform(data)
indexedData.show()
```
# Normalizer
```
from pyspark.ml.feature import Normalizer
from pyspark.ml.linalg import Vectors
dataFrame = spark.createDataFrame([
(0, Vectors.dense([1.0, 0.5, -1.0]),),
(1, Vectors.dense([2.0, 1.0, 1.0]),),
(2, Vectors.dense([4.0, 10.0, 2.0]),)
], ["id", "features"])
# Normalize each Vector using $L^1$ norm.
normalizer = Normalizer(inputCol="features", outputCol="normFeatures", p=1.0)
l1NormData = normalizer.transform(dataFrame)
print("Normalized using L^1 norm")
l1NormData.show()
# Normalize each Vector using $L^\infty$ norm.
lInfNormData = normalizer.transform(dataFrame, {normalizer.p: float("inf")})
print("Normalized using L^inf norm")
lInfNormData.show()
```
# Imputer
https://spark.apache.org/docs/latest/api/python/pyspark.ml.html#pyspark.ml.feature.Imputer
```
from pyspark.ml.feature import Imputer
df = spark.createDataFrame([
(1.0, float("nan")),
(2.0, float("nan")),
(float("nan"), 3.0),
(4.0, 4.0),
(5.0, 5.0)
], ["a", "b"])
imputer = Imputer(inputCols=["a", "b"], outputCols=["out_a", "out_b"])
model = imputer.fit(df)
model.transform(df).show()
```
| github_jupyter |
## UBC Intro to Machine Learning
### APIs
Instructor: Socorro Dominguez
February 05, 2022
## Exercise to try in your local machine
## Motivation
For our ML class, we want to do a Classifier that differentiates images from dogs and cats.
## Problem
We need a dataset to do this. Our friends don't have enough cats and dogs.
Let's take free, open and legal data from the [Unsplash Image API](https://unsplash.com/developers).
## Caveats
Sometimes, raw data is unsuitable for machine learning algorithms. For instance, we may want:
- Only images that are landscape (i.e. width > height)
- All our images to be of the same resolution
---
## Step 1: Get cat and dog image URLs from the API
We will use the [`search/photos` GET method](https://unsplash.com/documentation#search-photos).
```
import requests
import config as cfg
# API variables
root_endpoint = 'https://api.unsplash.com/'
client_id = cfg.splash['key']
# Wrapper function for making API calls and grabbing results
def search_photos(search_term):
api_method = 'search/photos'
endpoint = root_endpoint + api_method
response = requests.get(endpoint,
params={'query': search_term, 'per_page': 30, 'client_id': client_id})
status_code, result = response.status_code, response.json()
if status_code != 200:
print(f'Bad status code: {status_code}')
image_urls = [img['urls']['small'] for img in result['results']]
return image_urls
dog_urls = search_photos('dog')
cat_urls = search_photos('cat')
cat_urls
```
---
## Step 2: Download the images from the URLs
(Step 2a: Google [how to download an image from a URL in Python](https://stackoverflow.com/a/40944159))
We'll just define the function to download an image for now. Later on, we'll use it on images one at a time (but after doing some processing).
```
from PIL import Image
def download_image(url):
image = Image.open(requests.get(url, stream=True).raw)
return image
test_img = download_image(cat_urls[0])
test_img.show()
```
---
## Step 3: Download and save images that meet our requirements
We'll need to know how to work with the [PIL Image data type](https://pillow.readthedocs.io/en/stable/reference/Image.html), which is what our `download_image(url)` function returns. Namely, we need to be able to a) get it's resolution and b) resize it.
```
import os
def is_landscape(image):
return image.width > image.height
def save_category_images(urls, category_name, resolution=(256, 256)):
save_folder = f'saved_images/{category_name}'
if not os.path.exists(save_folder):
os.mkdir(save_folder)
for i, url in enumerate(urls):
image = download_image(url)
if is_landscape(image):
image = image.resize(resolution)
filename = f'{i:05d}.jpg'
image.save(os.path.join(save_folder, filename))
save_category_images(dog_urls, 'dogs')
save_category_images(cat_urls, 'cats')
```
| github_jupyter |
# contents from [sqlalchemy ORM tutorial](http://docs.sqlalchemy.org/en/latest/orm/tutorial.html)
---
# Version check
```
import sqlalchemy
sqlalchemy.__version__
```
# Connecting
+ create_engien() 함수 파라미터, database url 형식은 [여기](http://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls)에서 확인
```
from sqlalchemy import create_engine
engine = create_engine('sqlite:///:memory:', echo=True)
```
# Declare mapping
+ `__tablename__`, `primary_key` 는 필수
```
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
Base = declarative_base()
print Base
class UserA(Base):
__tablename__ = 'users_a'
id = Column(Integer, primary_key=True)
name = Column(String)
fullname = Column(String)
password = Column(String)
def __repr__(self):
return "<UserA(name='{0}', fullname='{1}', password='{3}')>".format(self.name, self.fullname, self.password)
```
# Create a schema
```
UserA.__table__
```
+ Table 객체는 많은 MetaData 객체로 이루어지는데, 신경 쓸 필요없다.
Table(
'users', MetaData(bind=None),
Column('id', Integer(), table=<users>, primary_key=True, nullable=False),
Column('name', String(), table=<users>),
Column('fullname', String(), table=<users>),
Column('password', String(), table=<users>),
schema=None)
+ `Base.metadata.create_all( engine )` 을 호출해서 실제로 `users` 테이블을 생성한다.
+ VARCHAR 필드에 length 가 없는데, SQLite, Postgresql 에서는 유효하지만 다른 DB 에선 그렇지 않을 수 있으며 `String(250)` 형태로 써주면 VARCHAR 에 length 를 지정할 수 있음
```
class User(Base):
__tablename__ = 'users'
__table_args__ = {'extend_existing':True} # 이미 users 테이블이 존재하는 경우 덮어씀
id = Column(Integer, primary_key=True)
name = Column(String(50))
fullname = Column(String(255))
password = Column(String(255))
def __repr__(self):
return "<User(id={3}, name='{0}', fullname='{1}', password='{2}')>".format(self.name, self.fullname, self.password, self.id)
# `users` table 을 실제로 생성한다.
Base.metadata.create_all(engine)
# `users`, `user_a` 테이블을 보려면
User.__table__
```
# Create an instance of the Mapped Class
```
ed_user = User(name="ed", fullname="Ed jones", password="EdsPasswordz")
print ed_user
print ed_user.id # sqlalchemy sets default value
```
# Creating a session
`create_engine()` 을 통해서 `engine` 인스턴스가 아직 만들어지지 않은 경우
Session = sessionmaker()
...
# engine instance 를 만든 후
...
...
# 나중에 engine 과 연결시켜 session 인스턴스를 생성할 수 도 있다.
session = Session.configure(bind=engine)
...
```
from sqlalchemy.orm import sessionmaker
Session = sessionmaker(bind=engine)
session = Session() # session 인스턴스 생성
```
# Adding new objects
+ session.add() 를 호출하는 시점에 `insert` 가 이루어지지 않고, 필요한 시점을 sqlalchemy 가 캐치해서 `insert` 한다. (select 를 한다든가...)
```
ed_user = User(name='ed', fullname='ed jones', password='edspasswordz')
session.add(ed_user)
our_user = session.query(User).filter_by(name='ed').first()
print our_user, our_user.id
```
+ 여러 `User` 객체를 insert
```
session.add_all([
User(name='somma', fullname='yonghwan, noh', password='passwordzzzzzz'),
User(name='somma1', fullname='yonghwan, noh1', password='passwordzzzzzz1'),
User(name='somma2', fullname='yonghwan, noh2', password='passwordzzzzzz2'),
User(name='somma3', fullname='yonghwan, noh3', password='passwordzzzzzz3'),
User(name='somma4', fullname='yonghwan, noh4', password='passwordzzzzzz4'),
User(name='somma5', fullname='yonghwan, noh5', password='passwordzzzzzz5'),
User(name='somma6', fullname='yonghwan, noh6', password='passwordzzzzzz6'),
])
```
+ ed_user 객체의 값을 변경하면 알아서 감지해서 처리해준다.
```
print ed_user
ed_user.password = 'changed passwordzz'
print ed_user
session.dirty # identity map 을 통해 변경이 일어난 객체를 보여주고
session.new # 추가된 객체들...
session.commit() # db 에 쓴다.
ed_user.id
```
# Rolling Back
session 은 transaction 안에서 동작하기 때문에 rollback 할 수 있다.
```
# modify ed_user's name
print ed_user.name
ed_user.name = 'not ed jones'
# add erroneous user, `fake_user`
fake_user = User(name="fakeuser", fullname="invalid", password="abcde")
session.add(fake_user)
# query
session.query(User).filter(User.name.in_(['not ed jones', 'fakeuser'])).all()
# rollback
session.rollback()
print session.query(User).filter(User.name.in_(['not ed jones', 'fakeuser'])).all()
print ed_user.name
```
# Querying
## basic query, order_by, label
```
for instance in session.query(User).order_by(User.id):
print instance.name, instance.fullname
for name, fullname in session.query(User.name, User.fullname):
print name, fullname
for row in session.query(User, User.name).all():
print row.User, row.name
for row in session.query(User.name.label('name_label')).all():
print row.name_label
```
## alias, limit, offset
```
# alias
from sqlalchemy.orm import aliased
user_alias = aliased(User, name='user_alias')
for row in session.query(user_alias, user_alias.name).all():
print row.user_alias
# limit
for u in session.query(User).order_by(User.id)[1:3]:
print u
```
## filtering results
`session.query()` 의 결과는 `KeyedTuple` 객체, `for name, in ...` 이렇게 쓰면 name 에는 tuple[0] 이 할당되고, `for name in ...` 이렇게 쓰면 name 은 KeyedTuple 객체이므로 출력하려면 `name[0], name[1]` 이런식으로 사용하면 됨
```
for name in session.query(User.name).filter_by(fullname='ed jones'):
print name[0], type(name)
for name, in session.query(User.name).filter_by(fullname='ed jones'):
print name
for name, in session.query(User.name).filter(User.fullname=='ed jones'):
print name
for name, in session.query(User.name).filter(User.name == 'ed').\
filter(User.fullname == 'ed jones'):
print name
```
## common filter operators
```
# is null / is not null
print session.query(User).filter(User.name != None).first()
print session.query(User).filter(User.name.is_(None)).first()
print session.query(User).filter(User.name.isnot(None)).first()
# not in
print session.query(User).filter(~User.name.in_(['ed', 'somma'])).first()
# in
print session.query(User).filter(User.name.in_(['ed', 'somma'])).first()
# like
print session.query(User).filter(User.name.like('%somma%')).first()
# not equals
print session.query(User).filter(User.name != 'ed').first()
# equals
print session.query(User).filter(User.name == 'ed').first()
# and
from sqlalchemy import and_
print session.query(User).filter(and_(User.name == 'ed', User.fullname == 'ed jones')).first()
# send multiple expression to .filter()
print session.query(User).filter(User.name == 'ed', User.fullname == 'ed jones').first()
# or chain multiple filter()/filter_by() calls
print session.query(User).filter(User.name == 'ed').filter(User.fullname == 'ed jones').first()
# or
from sqlalchemy import or_
print session.query(User).filter(or_(User.name == 'ed', User.fullname == 'ed jones' )).first()
```
# Returning Lists and Scalars
```
# all()
query = session.query(User).filter(User.name.like('somm%')).order_by(User.id)
for row in query.all():
print row
# first()
print query.first()
# one()
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
# if no result
try:
query = session.query(User).filter(User.name == 'no_name').order_by(User.id)
users = query.one()
except NoResultFound as e:
print 'exception = {0}'.format(e.message)
# if multiple results
try:
query = session.query(User).filter(User.name.like('somma%')).order_by(User.id)
users = query.one()
print users
except MultipleResultsFound as e:
print 'exception = {0}'.format(e.message)
# scalar()
# return first element of first result or None if no result present.
# if multiple result returned, `MultipleResultsFound` exception raised.
try:
query = session.query(User).filter(User.name.like('somma%')).order_by(User.id)
users = query.scalar()
except MultipleResultsFound as e:
print 'exception = {0}'.format(e.message)
```
# Using Literal SQL
```
from sqlalchemy import text
for user in session.query(User).filter(text('id < 3')).order_by(text('id')).all():
print user.name
for user in session.query(User).filter(text('id < :id and name = :name')).params(id = 3, name = 'somma').all():
print user.name
for users in session.query(User).from_statement(text('select * from users where name=:name')).params(name='ed').all():
print users
for id, name, third_ret in session.query('id', 'name', 'the_number_12')\
.from_statement(text('select id, name, 12 as the_number_12 from users where name like :name'))\
.params(name='somma%').all():
print '{0}, {1}, {2}'.format(id, name, third_ret)
```
# Counting
```
print session.query(User).count()
print session.query(User).filter(User.name.like('somma%')).count()
```
## count() more nicer way!
```
from sqlalchemy import func
session.query(func.count(User.name), User.name).group_by(User.name).all()
# select count(*) from users
session.query(func.count('*')).select_from(User).scalar()
# `select_from()` can be removed if express the count of User's primary key.
session.query(func.count(User.id)).scalar()
```
---
# Building a Relationship
```
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship, backref
class Address(Base):
__tablename__ = 'addresses'
#__table_args__ = {'extend_existing':True} # 이미 users 테이블이 존재하는 경우 덮어씀
id = Column(Integer, primary_key = True)
email_address = Column(String, nullable = False)
user_id = Column(Integer, ForeignKey('users.id'))
user = relationship('User', backref = backref('addresses', order_by = id))
def __repr__(self):
return '<Address (email_address = {0})>'.format(self.email_address)
# create table
Base.metadata.create_all(engine)
```
---
# Working with Related Objects
| github_jupyter |
```
#12/29/20
#runnign synthetic benchmark graphs for synthetic OR datasets generated
#making benchmark images
import keras
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Dropout, Activation, Flatten, Input, Lambda
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Conv1D, MaxPooling1D, LSTM, ConvLSTM2D, GRU, CuDNNLSTM, CuDNNGRU, BatchNormalization, LocallyConnected2D, Permute, TimeDistributed, Bidirectional
from keras.layers import Concatenate, Reshape, Softmax, Conv2DTranspose, Embedding, Multiply
from keras.callbacks import ModelCheckpoint, EarlyStopping, Callback
from keras import regularizers
from keras import backend as K
from keras.utils.generic_utils import Progbar
from keras.layers.merge import _Merge
import keras.losses
from keras.datasets import mnist
from functools import partial
from collections import defaultdict
import tensorflow as tf
from tensorflow.python.framework import ops
import isolearn.keras as iso
import numpy as np
import tensorflow as tf
import logging
logging.getLogger('tensorflow').setLevel(logging.ERROR)
import os
import pickle
import numpy as np
import isolearn.io as isoio
import isolearn.keras as isol
import pandas as pd
import scipy.sparse as sp
import scipy.io as spio
import matplotlib.pyplot as plt
from sequence_logo_helper import dna_letter_at, plot_dna_logo
from keras.backend.tensorflow_backend import set_session
def contain_tf_gpu_mem_usage() :
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
set_session(sess)
contain_tf_gpu_mem_usage()
class EpochVariableCallback(Callback) :
def __init__(self, my_variable, my_func) :
self.my_variable = my_variable
self.my_func = my_func
def on_epoch_begin(self, epoch, logs={}) :
K.set_value(self.my_variable, self.my_func(K.get_value(self.my_variable), epoch))
#ONLY RUN THIS CELL ONCE
from tensorflow.python.framework import ops
#Stochastic Binarized Neuron helper functions (Tensorflow)
#ST Estimator code adopted from https://r2rt.com/beyond-binary-ternary-and-one-hot-neurons.html
#See Github https://github.com/spitis/
def st_sampled_softmax(logits):
with ops.name_scope("STSampledSoftmax") as namescope :
nt_probs = tf.nn.softmax(logits)
onehot_dim = logits.get_shape().as_list()[1]
sampled_onehot = tf.one_hot(tf.squeeze(tf.multinomial(logits, 1), 1), onehot_dim, 1.0, 0.0)
with tf.get_default_graph().gradient_override_map({'Ceil': 'Identity', 'Mul': 'STMul'}):
return tf.ceil(sampled_onehot * nt_probs)
def st_hardmax_softmax(logits):
with ops.name_scope("STHardmaxSoftmax") as namescope :
nt_probs = tf.nn.softmax(logits)
onehot_dim = logits.get_shape().as_list()[1]
sampled_onehot = tf.one_hot(tf.argmax(nt_probs, 1), onehot_dim, 1.0, 0.0)
with tf.get_default_graph().gradient_override_map({'Ceil': 'Identity', 'Mul': 'STMul'}):
return tf.ceil(sampled_onehot * nt_probs)
@ops.RegisterGradient("STMul")
def st_mul(op, grad):
return [grad, grad]
#Gumbel Distribution Sampler
def gumbel_softmax(logits, temperature=0.5) :
gumbel_dist = tf.contrib.distributions.RelaxedOneHotCategorical(temperature, logits=logits)
batch_dim = logits.get_shape().as_list()[0]
onehot_dim = logits.get_shape().as_list()[1]
return gumbel_dist.sample()
#PWM Masking and Sampling helper functions
def mask_pwm(inputs) :
pwm, onehot_template, onehot_mask = inputs
return pwm * onehot_mask + onehot_template
def sample_pwm_st(pwm_logits) :
n_sequences = K.shape(pwm_logits)[0]
seq_length = K.shape(pwm_logits)[2]
flat_pwm = K.reshape(pwm_logits, (n_sequences * seq_length, 4))
sampled_pwm = st_sampled_softmax(flat_pwm)
return K.reshape(sampled_pwm, (n_sequences, 1, seq_length, 4))
def sample_pwm_gumbel(pwm_logits) :
n_sequences = K.shape(pwm_logits)[0]
seq_length = K.shape(pwm_logits)[2]
flat_pwm = K.reshape(pwm_logits, (n_sequences * seq_length, 4))
sampled_pwm = gumbel_softmax(flat_pwm, temperature=0.5)
return K.reshape(sampled_pwm, (n_sequences, 1, seq_length, 4))
#Generator helper functions
def initialize_sequence_templates(generator, sequence_templates, background_matrices) :
embedding_templates = []
embedding_masks = []
embedding_backgrounds = []
for k in range(len(sequence_templates)) :
sequence_template = sequence_templates[k]
onehot_template = iso.OneHotEncoder(seq_length=len(sequence_template))(sequence_template).reshape((1, len(sequence_template), 4))
for j in range(len(sequence_template)) :
if sequence_template[j] not in ['N', 'X'] :
nt_ix = np.argmax(onehot_template[0, j, :])
onehot_template[:, j, :] = -4.0
onehot_template[:, j, nt_ix] = 10.0
elif sequence_template[j] == 'X' :
onehot_template[:, j, :] = -1.0
onehot_mask = np.zeros((1, len(sequence_template), 4))
for j in range(len(sequence_template)) :
if sequence_template[j] == 'N' :
onehot_mask[:, j, :] = 1.0
embedding_templates.append(onehot_template.reshape(1, -1))
embedding_masks.append(onehot_mask.reshape(1, -1))
embedding_backgrounds.append(background_matrices[k].reshape(1, -1))
embedding_templates = np.concatenate(embedding_templates, axis=0)
embedding_masks = np.concatenate(embedding_masks, axis=0)
embedding_backgrounds = np.concatenate(embedding_backgrounds, axis=0)
generator.get_layer('template_dense').set_weights([embedding_templates])
generator.get_layer('template_dense').trainable = False
generator.get_layer('mask_dense').set_weights([embedding_masks])
generator.get_layer('mask_dense').trainable = False
generator.get_layer('background_dense').set_weights([embedding_backgrounds])
generator.get_layer('background_dense').trainable = False
#Generator construction function
def build_sampler(batch_size, seq_length, n_classes=1, n_samples=1, sample_mode='st') :
#Initialize Reshape layer
reshape_layer = Reshape((1, seq_length, 4))
#Initialize background matrix
onehot_background_dense = Embedding(n_classes, seq_length * 4, embeddings_initializer='zeros', name='background_dense')
#Initialize template and mask matrices
onehot_template_dense = Embedding(n_classes, seq_length * 4, embeddings_initializer='zeros', name='template_dense')
onehot_mask_dense = Embedding(n_classes, seq_length * 4, embeddings_initializer='ones', name='mask_dense')
#Initialize Templating and Masking Lambda layer
masking_layer = Lambda(mask_pwm, output_shape = (1, seq_length, 4), name='masking_layer')
background_layer = Lambda(lambda x: x[0] + x[1], name='background_layer')
#Initialize PWM normalization layer
pwm_layer = Softmax(axis=-1, name='pwm')
#Initialize sampling layers
sample_func = None
if sample_mode == 'st' :
sample_func = sample_pwm_st
elif sample_mode == 'gumbel' :
sample_func = sample_pwm_gumbel
upsampling_layer = Lambda(lambda x: K.tile(x, [n_samples, 1, 1, 1]), name='upsampling_layer')
sampling_layer = Lambda(sample_func, name='pwm_sampler')
permute_layer = Lambda(lambda x: K.permute_dimensions(K.reshape(x, (n_samples, batch_size, 1, seq_length, 4)), (1, 0, 2, 3, 4)), name='permute_layer')
def _sampler_func(class_input, raw_logits) :
#Get Template and Mask
onehot_background = reshape_layer(onehot_background_dense(class_input))
onehot_template = reshape_layer(onehot_template_dense(class_input))
onehot_mask = reshape_layer(onehot_mask_dense(class_input))
#Add Template and Multiply Mask
pwm_logits = masking_layer([background_layer([raw_logits, onehot_background]), onehot_template, onehot_mask])
#Compute PWM (Nucleotide-wise Softmax)
pwm = pwm_layer(pwm_logits)
#Tile each PWM to sample from and create sample axis
pwm_logits_upsampled = upsampling_layer(pwm_logits)
sampled_pwm = sampling_layer(pwm_logits_upsampled)
sampled_pwm = permute_layer(sampled_pwm)
sampled_mask = permute_layer(upsampling_layer(onehot_mask))
return pwm_logits, pwm, sampled_pwm, onehot_mask, sampled_mask
return _sampler_func
#for formulation 2 graphing
def returnXMeanLogits(e_train):
#returns x mean logits for displayign the pwm difference for the version 2 networks
#Visualize background sequence distribution
seq_e_train = one_hot_encode(e_train,seq_len=50)
x_train = seq_e_train
x_train = np.reshape(x_train, (x_train.shape[0], 1, x_train.shape[1], x_train.shape[2]))
pseudo_count = 1.0
x_mean = (np.sum(x_train, axis=(0, 1)) + pseudo_count) / (x_train.shape[0] + 4. * pseudo_count)
x_mean_logits = np.log(x_mean / (1. - x_mean))
return x_mean_logits, x_mean
#loading testing dataset
from optimusFunctions import *
import pandas as pd
csv_to_open = "optimus5_synthetic_random_insert_if_uorf_2_start_1_stop_variable_loc_512.csv"
dataset_name = csv_to_open.replace(".csv", "")
print (dataset_name)
data_df = pd.read_csv("./" + csv_to_open) #open from scores folder
#loaded test set which is sorted by number of start/stop signals
seq_e_test = one_hot_encode(data_df, seq_len=50)
benchmarkSet_seqs = seq_e_test
x_test = np.reshape(benchmarkSet_seqs, (benchmarkSet_seqs.shape[0], 1, benchmarkSet_seqs.shape[1], benchmarkSet_seqs.shape[2]))
print (x_test.shape)
e_train = pd.read_csv("bottom5KIFuAUGTop5KIFuAUG.csv")
print ("training: ", e_train.shape[0], " testing: ", x_test.shape[0])
seq_e_train = one_hot_encode(e_train,seq_len=50)
x_mean_logits, x_mean = returnXMeanLogits(e_train)
seq_e_train = one_hot_encode(e_train,seq_len=50)
x_train = seq_e_train
x_train = np.reshape(x_train, (x_train.shape[0], 1, x_train.shape[1], x_train.shape[2]))
#background
#for formulation 2 graphing
def returnXMeanLogits(e_train):
#returns x mean logits for displayign the pwm difference for the version 2 networks
#Visualize background sequence distribution
seq_e_train = one_hot_encode(e_train,seq_len=50)
x_train = seq_e_train
x_train = np.reshape(x_train, (x_train.shape[0], 1, x_train.shape[1], x_train.shape[2]))
pseudo_count = 1.0
x_mean = (np.sum(x_train, axis=(0, 1)) + pseudo_count) / (x_train.shape[0] + 4. * pseudo_count)
x_mean_logits = np.log(x_mean / (1. - x_mean))
return x_mean_logits, x_mean
e_train = pd.read_csv("bottom5KIFuAUGTop5KIFuAUG.csv")
print ("training: ", e_train.shape[0], " testing: ", x_test.shape[0])
#one hot encode with optimus encoders
seq_e_train = one_hot_encode(e_train,seq_len=50)
x_mean_logits, x_mean = returnXMeanLogits(e_train)
x_train = seq_e_train
x_train = np.reshape(x_train, (x_train.shape[0], 1, x_train.shape[1], x_train.shape[2]))
#Define sequence template for optimus
sequence_template = 'N'*50
sequence_mask = np.array([1 if sequence_template[j] == 'N' else 0 for j in range(len(sequence_template))])
#Visualize background sequence distribution
save_figs = True
plot_dna_logo(np.copy(x_mean), sequence_template=sequence_template, figsize=(14, 0.65), logo_height=1.0, plot_start=0, plot_end=50)
#Calculate mean training set conservation
entropy = np.sum(x_mean * -np.log(x_mean), axis=-1) / np.log(2.0)
conservation = 2.0 - entropy
x_mean_conservation = np.sum(conservation) / np.sum(sequence_mask)
print("Mean conservation (bits) = " + str(x_mean_conservation))
#Calculate mean training set kl-divergence against background
x_train_clipped = np.clip(np.copy(x_train[:, 0, :, :]), 1e-8, 1. - 1e-8)
kl_divs = np.sum(x_train_clipped * np.log(x_train_clipped / np.tile(np.expand_dims(x_mean, axis=0), (x_train_clipped.shape[0], 1, 1))), axis=-1) / np.log(2.0)
x_mean_kl_divs = np.sum(kl_divs * sequence_mask, axis=-1) / np.sum(sequence_mask)
x_mean_kl_div = np.mean(x_mean_kl_divs)
print("Mean KL Div against background (bits) = " + str(x_mean_kl_div))
#Initialize Encoder and Decoder networks
batch_size = 32
seq_length = 50
n_samples = 128
sample_mode = 'st'
#sample_mode = 'gumbel'
#Load sampler
sampler = build_sampler(batch_size, seq_length, n_classes=1, n_samples=n_samples, sample_mode=sample_mode)
#Load Predictor
predictor_path = 'optimusRetrainedMain.hdf5'
predictor = load_model(predictor_path)
predictor.trainable = False
predictor.compile(optimizer=keras.optimizers.SGD(lr=0.1), loss='mean_squared_error')
#Build scrambler model
dummy_class = Input(shape=(1,), name='dummy_class')
input_logits = Input(shape=(1, seq_length, 4), name='input_logits')
pwm_logits, pwm, sampled_pwm, pwm_mask, sampled_mask = sampler(dummy_class, input_logits)
scrambler_model = Model([input_logits, dummy_class], [pwm_logits, pwm, sampled_pwm, pwm_mask, sampled_mask])
#Initialize Sequence Templates and Masks
initialize_sequence_templates(scrambler_model, [sequence_template], [x_mean_logits])
scrambler_model.trainable = False
scrambler_model.compile(
optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999),
loss='mean_squared_error'
)
#open all score and reshape as needed
file_names = [
"l2x_" + dataset_name + "_importance_scores_test.npy",
"invase_" + dataset_name + "_conv_importance_scores_test.npy",
"l2x_" + dataset_name + "_full_data_importance_scores_test.npy",
"invase_" + dataset_name + "_conv_full_data_importance_scores_test.npy",
]
#deepexplain_optimus_utr_OR_logic_synth_1_start_2_stops_method_integrated_gradients_importance_scores_test.npy
model_names =[
"l2x",
"invase",
"l2x_full_data",
"invase_full_data",
]
model_importance_scores_test = [np.load("./" + file_name) for file_name in file_names]
for scores in model_importance_scores_test:
print (scores.shape)
for model_i in range(len(model_names)) :
if model_importance_scores_test[model_i].shape[-1] > 1 :
model_importance_scores_test[model_i] = np.sum(model_importance_scores_test[model_i], axis=-1, keepdims=True)
for scores in model_importance_scores_test:
print (scores.shape)
#reshape for mse script -> if not (3008, 1, 50, 1) make it that shape
idealShape = model_importance_scores_test[0].shape
print (idealShape)
for model_i in range(len(model_names)) :
if model_importance_scores_test[model_i].shape != idealShape:
model_importance_scores_test[model_i] = np.expand_dims(model_importance_scores_test[model_i], 1)
for scores in model_importance_scores_test:
print (scores.shape)
on_state_logit_val = 50.
print (x_test.shape)
dummy_test = np.zeros((x_test.shape[0], 1))
x_test_logits = 2. * x_test - 1.
print (x_test_logits.shape)
print (dummy_test.shape)
x_test_squeezed = np.squeeze(x_test)
y_pred_ref = predictor.predict([x_test_squeezed], batch_size=32, verbose=True)[0]
_, _, _, pwm_mask, sampled_mask = scrambler_model.predict([x_test_logits, dummy_test], batch_size=batch_size)
feature_quantiles = [0.76, 0.82, 0.88]
for name in model_names:
for quantile in feature_quantiles:
totalName = name + "_" + str(quantile).replace(".","_") + "_quantile_MSE"
data_df[totalName] = None
print (data_df.columns)
feature_quantiles = [0.76, 0.82, 0.88]
#batch_size = 128
from sklearn import metrics
model_mses = []
for model_i in range(len(model_names)) :
print("Benchmarking model '" + str(model_names[model_i]) + "'...")
feature_quantile_mses = []
for feature_quantile_i, feature_quantile in enumerate(feature_quantiles) :
print("Feature quantile = " + str(feature_quantile))
if len(model_importance_scores_test[model_i].shape) >= 5 :
importance_scores_test = np.abs(model_importance_scores_test[model_i][feature_quantile_i, ...])
else :
importance_scores_test = np.abs(model_importance_scores_test[model_i])
n_to_test = importance_scores_test.shape[0] // batch_size * batch_size
importance_scores_test = importance_scores_test[:n_to_test]
importance_scores_test *= np.expand_dims(np.max(pwm_mask[:n_to_test], axis=-1), axis=-1)
quantile_vals = np.quantile(importance_scores_test, axis=(1, 2, 3), q=feature_quantile, keepdims=True)
quantile_vals = np.tile(quantile_vals, (1, importance_scores_test.shape[1], importance_scores_test.shape[2], importance_scores_test.shape[3]))
top_logits_test = np.zeros(importance_scores_test.shape)
top_logits_test[importance_scores_test > quantile_vals] = on_state_logit_val
top_logits_test = np.tile(top_logits_test, (1, 1, 1, 4)) * x_test_logits[:n_to_test]
_, _, samples_test, _, _ = scrambler_model.predict([top_logits_test, dummy_test[:n_to_test]], batch_size=batch_size)
print (samples_test.shape)
msesPerPoint = []
for data_ix in range(samples_test.shape[0]) :
#for each sample, look at kl divergence for the 128 size batch generated
#for MSE, just track the pred vs original pred
if data_ix % 1000 == 0 :
print("Processing example " + str(data_ix) + "...")
#from optimus R^2, MSE, Pearson R script
justPred = np.expand_dims(np.expand_dims(x_test[data_ix, 0, :, :], axis=0), axis=-1)
justPredReshape = np.reshape(justPred, (1,50,4))
expanded = np.expand_dims(samples_test[data_ix, :, 0, :, :], axis=-1) #batch size is 128
expandedReshape = np.reshape(expanded, (n_samples, 50,4))
y_test_hat_ref = predictor.predict(x=justPredReshape, batch_size=1)[0][0]
y_test_hat = predictor.predict(x=[expandedReshape], batch_size=32)
pwmGenerated = y_test_hat.tolist()
tempOriginals = [y_test_hat_ref]*y_test_hat.shape[0]
asArrayOrig = np.array(tempOriginals)
asArrayGen = np.array(pwmGenerated)
squeezed = np.squeeze(asArrayGen)
mse = metrics.mean_squared_error(asArrayOrig, squeezed)
#msesPerPoint.append(mse)
totalName = model_names[model_i] + "_" + str(feature_quantile).replace(".","_") + "_quantile_MSE"
data_df.at[data_ix, totalName] = mse
msesPerPoint.append(mse)
msesPerPoint = np.array(msesPerPoint)
feature_quantile_mses.append(msesPerPoint)
model_mses.append(feature_quantile_mses)
#Store benchmark results as tables
save_figs = False
mse_table = np.zeros((len(model_mses), len(model_mses[0])))
for i, model_name in enumerate(model_names) :
for j, feature_quantile in enumerate(feature_quantiles) :
mse_table[i, j] = np.mean(model_mses[i][j])
#Plot and store mse table
f = plt.figure(figsize = (4, 6))
cells = np.round(mse_table, 3).tolist()
print("--- MSEs ---")
max_len = np.max([len(model_name.upper().replace("\n", " ")) for model_name in model_names])
print(("-" * max_len) + " " + " ".join([(str(feature_quantile) + "0")[:4] for feature_quantile in feature_quantiles]))
for i in range(len(cells)) :
curr_len = len([model_name.upper().replace("\n", " ") for model_name in model_names][i])
row_str = [model_name.upper().replace("\n", " ") for model_name in model_names][i] + (" " * (max_len - curr_len))
for j in range(len(cells[i])) :
cells[i][j] = (str(cells[i][j]) + "00000")[:4]
row_str += " " + cells[i][j]
print(row_str)
print("")
table = plt.table(cellText=cells, rowLabels=[model_name.upper().replace("\n", " ") for model_name in model_names], colLabels=feature_quantiles, loc='center')
ax = plt.gca()
#f.patch.set_visible(False)
ax.axis('off')
ax.axis('tight')
plt.tight_layout()
if save_figs :
plt.savefig(dataset_name + "_l2x_and_invase_full_data" + "_mse_table.png", dpi=300, transparent=True)
plt.savefig(dataset_name + "_l2x_and_invase_full_data" + "_mse_table.eps")
plt.show()
```
| github_jupyter |
# Signal Autoencoder
```
import numpy as np
import scipy as sp
import scipy.stats
import itertools
import logging
import matplotlib.pyplot as plt
import pandas as pd
import torch.utils.data as utils
import math
import time
import tqdm
import torch
import torch.optim as optim
import torch.nn.functional as F
from argparse import ArgumentParser
from torch.distributions import MultivariateNormal
import torch.nn as nn
import torch.nn.init as init
import sys
sys.path.append("../new_flows")
from flows import RealNVP, Planar, MAF
from models import NormalizingFlowModel
####MAF
class VAE_NF(nn.Module):
def __init__(self, K, D):
super().__init__()
self.dim = D
self.K = K
self.encoder = nn.Sequential(
nn.Linear(16, 50),
nn.LeakyReLU(True),
nn.Linear(50, 48),
nn.LeakyReLU(True),
nn.Linear(48, D * 2)
)
self.decoder = nn.Sequential(
nn.Linear(D, 48),
nn.LeakyReLU(True),
nn.Linear(48, 50),
nn.LeakyReLU(True),
nn.Linear(50, 16)
)
flow_init = MAF(dim=D)
flows_init = [flow_init for _ in range(K)]
prior = MultivariateNormal(torch.zeros(D).cuda(), torch.eye(D).cuda())
self.flows = NormalizingFlowModel(prior, flows_init)
def forward(self, x):
# Run Encoder and get NF params
enc = self.encoder(x)
mu = enc[:, :self.dim]
log_var = enc[:, self.dim: self.dim * 2]
# Re-parametrize
sigma = (log_var * .5).exp()
z = mu + sigma * torch.randn_like(sigma)
kl_div = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())
# Construct more expressive posterior with NF
z_k, _, sum_ladj = self.flows(z)
kl_div = kl_div / x.size(0) - sum_ladj.mean() # mean over batch
# Run Decoder
x_prime = self.decoder(z_k)
return x_prime, kl_div
prong_2 = pd.read_hdf("/data/t3home000/spark/QUASAR/preprocessing/delphes_output_5000_850_450.h5")
#prong_3 = pd.read_hdf("/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_3prong_rnd.h5")
columns = prong_2.columns
columns
dt = prong_2.values
correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0) &(dt[:,2]>0) & (dt[:,16]>0) & (dt[:,32]>0)
dt = dt[correct]
for i in range(13,19):
dt[:,i] = dt[:,i]/dt[:,3]
for i in range(29,35):
dt[:,i] = dt[:,i]/(dt[:,19])
correct = (dt[:,16]>0) & (dt[:,29]>=0) &(dt[:,29]<=1)&(dt[:,30]>=0) &(dt[:,30]<=1)&(dt[:,31]>=0) &(dt[:,31]<=1)&(dt[:,32]>=0) &(dt[:,32]<=1)&(dt[:,33]>=0) &(dt[:,33]<=1)&(dt[:,34]>=-0.01) &(dt[:,34]<=1)
dt = dt[correct]
#Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
#Y = dt[:,[4,5,6,7,8,11,12,13,14,15,16,17,18,20,21,22,23,24,27,28,29,30,31,32,33,34]] # When no jet 1,2 raw mass included
#Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]]
#idx = dt[:,-1]
#bkg_idx = np.where(idx==0)[0]
#signal_idx = np.where((idx==1) & (dt[:,3]>300))[0]
#signal_idx = np.where((idx==1)) [0]
#dt = dt[signal_idx]
bsmlike = np.where(dt[:,16]>0.9)[0]
dt = dt[bsmlike]
dt.shape
j1sdb = dt[:,3]*dt[:,16]
j2sdb = dt[:,19]*dt[:,32]
pt = dt[:,1]
m = j1sdb[:]
m2 = j2sdb[:]
tau21 = dt[:,4]
tau32 = dt[:,5]
tau43 = dt[:,6]
tau54 = dt[:,7]
tau65 = dt[:,8]
massratio = dt[:,16]
rho = np.log((m*m)/(pt*pt))
rhoprime = np.log((m*m)/(pt*1))
tau21prime = tau21 + rhoprime * 0.088
tau32prime = tau32 + rhoprime * 0.025
tau43prime = tau43 + rhoprime * 0.01
tau54prime = tau54 + rhoprime * 0.001
j2pt = dt[:,2]
#m = j1sdb[mrange]
j2m = j2sdb[:]
j2tau21 = dt[:,20]
j2tau32 = dt[:,21]
j2tau43 = dt[:,22]
j2tau54 = dt[:,23]
j2tau65 = dt[:,24]
j2massratio = dt[:,32]
j2rho = np.log((j2m*j2m)/(j2pt*j2pt))
j2rhoprime = np.log((j2m*j2m)/(j2pt*1))
j2tau21prime = j2tau21 + j2rhoprime * 0.086
j2tau32prime = j2tau32 + j2rhoprime * 0.025
j2tau43prime = j2tau43 + j2rhoprime * 0.01
j2tau54prime = j2tau54 + j2rhoprime * 0.001
dt[:,4] = tau21prime
dt[:,5] = tau32prime
dt[:,6] = tau43prime
dt[:,7] = tau54prime
dt[:,20] = j2tau21prime
dt[:,21] = j2tau32prime
dt[:,22] = j2tau43prime
dt[:,23] = j2tau54prime
columns[19]
m1minusm2 = dt[:,3] - dt[:,19]
dt[:,19] = m1minusm2
Y = dt[:,[3,4,5,6,7,8,11,12,19,20,21,22,23,24,27,28]]
Y.shape
#if nprong == 3:
# dt = prong_3.values
# correct = (dt[:,3]>20) &(dt[:,19]>20)
# dt = dt[correct]
# for i in range(13,19):
# dt[:,i] = dt[:,i]/dt[:,3]
# for i in range(29,35):
# dt[:,i] = dt[:,i]/(dt[:,19])
# correct = (dt[:,29]>=0) &(dt[:,29]<=1)&(dt[:,30]>=0) &(dt[:,30]<=1)&(dt[:,31]>=0) &(dt[:,31]<=1)&(dt[:,32]>=0) &(dt[:,32]<=1)&(dt[:,33]>=0) &(dt[:,33]<=1)&(dt[:,34]>=-0.01) &(dt[:,34]<=1)
# dt = dt[correct]
# Y = dt[:,[4,5,6,7,8,11,12,13,14,15,16,17,18,20,21,22,23,24,27,28,29,30,31,32,33,34]]
# #Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
# idx = dt[:,-1]
# bkg_idx = np.where(idx==0)[0]
# signal_idx = np.where((idx==1) & (dt[:,3]>400))[0]
# #signal_idx = np.where((idx==1)) [0]
# Y = Y[signal_idx]
bins = np.linspace(0,1,100)
bins.shape
column = 5
#print(f_rnd.columns[column])
plt.hist(dt[:,16],bins,alpha=0.5,color='b');
#plt.hist(sigout[:,column],bins,alpha=0.5,color='r');
#plt.hist(out2[:,column],bins,alpha=0.5,color='g');
#plt.axvline(np.mean(Y[:,column]))
Y.shape
sig_mean = []
sig_std = []
for i in range(16):
mean = np.mean(Y[:,i])
std = np.std(Y[:,i])
sig_mean.append(mean)
sig_std.append(std)
Y[:,i] = (Y[:,i]-mean)/std
sig_mean
sig_std
total_sig = torch.tensor(Y)
total_sig.shape
bins = np.linspace(-3,3,100)
bins.shape
column = 5
#print(f_rnd.columns[column])
plt.hist(Y[:,1],bins,alpha=0.5,color='b');
#plt.hist(sigout[:,column],bins,alpha=0.5,color='r');
#plt.hist(out2[:,column],bins,alpha=0.5,color='g');
#plt.axvline(np.mean(Y[:,column]))
N_EPOCHS = 30
PRINT_INTERVAL = 2000
NUM_WORKERS = 4
LR = 1e-6
#N_FLOWS = 6
#Z_DIM = 8
N_FLOWS = 10
Z_DIM = 6
n_steps = 0
sigmodel = VAE_NF(N_FLOWS, Z_DIM).cuda()
print(sigmodel)
bs = 800
sig_train_iterator = utils.DataLoader(total_sig, batch_size=bs, shuffle=True)
sig_test_iterator = utils.DataLoader(total_sig, batch_size=bs)
sigoptimizer = optim.Adam(sigmodel.parameters(), lr=1e-6)
beta = 1
def sigtrain():
global n_steps
train_loss = []
sigmodel.train()
for batch_idx, x in enumerate(sig_train_iterator):
start_time = time.time()
x = x.float().cuda()
x_tilde, kl_div = sigmodel(x)
mseloss = nn.MSELoss(size_average=False)
huberloss = nn.SmoothL1Loss(size_average=False)
#loss_recons = F.binary_cross_entropy(x_tilde, x, size_average=False) / x.size(0)
loss_recons = mseloss(x_tilde,x ) / x.size(0)
#loss_recons = huberloss(x_tilde,x ) / x.size(0)
loss = loss_recons + beta* kl_div
sigoptimizer.zero_grad()
loss.backward()
sigoptimizer.step()
train_loss.append([loss_recons.item(), kl_div.item()])
if (batch_idx + 1) % PRINT_INTERVAL == 0:
print('\tIter [{}/{} ({:.0f}%)]\tLoss: {} Time: {:5.3f} ms/batch'.format(
batch_idx * len(x), 50000,
PRINT_INTERVAL * batch_idx / 50000,
np.asarray(train_loss)[-PRINT_INTERVAL:].mean(0),
1000 * (time.time() - start_time)
))
n_steps += 1
def sigevaluate(split='valid'):
global n_steps
start_time = time.time()
val_loss = []
sigmodel.eval()
with torch.no_grad():
for batch_idx, x in enumerate(sig_test_iterator):
x = x.float().cuda()
x_tilde, kl_div = sigmodel(x)
mseloss = nn.MSELoss(size_average=False)
huberloss = nn.SmoothL1Loss(size_average=False)
#loss_recons = F.binary_cross_entropy(x_tilde, x, size_average=False) / x.size(0)
loss_recons = mseloss(x_tilde,x ) / x.size(0)
#loss_recons = huberloss(x_tilde,x ) / x.size(0)
loss = loss_recons + beta * kl_div
val_loss.append(loss.item())
#writer.add_scalar('loss/{}/ELBO'.format(split), loss.item(), n_steps)
#writer.add_scalar('loss/{}/reconstruction'.format(split), loss_recons.item(), n_steps)
#writer.add_scalar('loss/{}/KL'.format(split), kl_div.item(), n_steps)
print('\nEvaluation Completed ({})!\tLoss: {:5.4f} Time: {:5.3f} s'.format(
split,
np.asarray(val_loss).mean(0),
time.time() - start_time
))
return np.asarray(val_loss).mean(0)
ae_def = {
"type":"sig",
"trainon":"BB2refined",
"features":"tauDDTwithm1andm1minusm2",
"architecture":"MAF",
"selection":"turnoncutandj1sdbcut0p9",
"trainloss":"MSELoss",
"beta":"beta1",
"zdimnflow":"z6f10",
}
ae_def
N_EPOCHS = 10
BEST_LOSS = 99
LAST_SAVED = -1
PATIENCE_COUNT = 0
PATIENCE_LIMIT = 5
for epoch in range(1, 1000):
print("Epoch {}:".format(epoch))
sigtrain()
cur_loss = sigevaluate()
if cur_loss <= BEST_LOSS:
PATIENCE_COUNT = 0
BEST_LOSS = cur_loss
LAST_SAVED = epoch
print("Saving model!")
torch.save(sigmodel.state_dict(),f"/data/t3home000/spark/QUASAR/weights/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['architecture']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}.h5")
else:
PATIENCE_COUNT += 1
print("Not saving model! Last saved: {}".format(LAST_SAVED))
if PATIENCE_COUNT > 10:
print("Patience Limit Reached")
break
sigmodel.load_state_dict(torch.load(f"/data/t3home000/spark/QUASAR/weights/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['architecture']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}.h5"))
sigout = sigmodel(torch.tensor(Y).float().cuda())[0]
sigout = sigout.data.cpu().numpy()
bins = np.linspace(-3,3,100)
bins.shape
column = 3
#print(f_rnd.columns[column]
plt.hist(Y[:,column],bins,alpha=0.5,color='b');
plt.hist(sigout[:,column],bins,alpha=0.5,color='r');
#plt.hist(out2[:,column],bins,alpha=0.5,color='g');
plt.axvline(np.mean(Y[:,column]))
inputlist = [
'/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB1_rnd.h5',
'/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB2.h5',
'/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB3.h5',
'/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_background.h5',
'/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_rnd.h5',
'/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_rnd.h5',
'/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_3prong_rnd.h5'
]
ae_def
outputlist_waic = [
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_bb1.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_bb2.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_bb3.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_purebkg.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_rndbkg.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_2prong.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_3prong.npy",
]
outputlist_justloss = [
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_bb1.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_bb2.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_bb3.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_purebkg.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_rndbkg.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_2prong.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_3prong.npy",
]
exist_signalflag = [
False,
False,
False,
False,
True,
True,
True,
]
is_signal = [
False,
False,
False,
False,
False,
True,
True
]
nprong = [
None,
None,
None,
None,
None,
'2prong',
'3prong'
]
for in_file, out_file_waic, out_file_justloss, sigbit_flag, is_sig, n_prong in zip(inputlist,outputlist_waic,outputlist_justloss,exist_signalflag,is_signal, nprong):
f_bb = pd.read_hdf(in_file)
dt = f_bb.values
correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0) &(dt[:,2]>0) & (dt[:,16]>0) & (dt[:,32]>0)
dt = dt[correct]
for i in range(13,19):
dt[:,i] = dt[:,i]/dt[:,3]
for i in range(29,35):
dt[:,i] = dt[:,i]/(dt[:,19])
correct = (dt[:,16]>0) & (dt[:,29]>=0) &(dt[:,29]<=1)&(dt[:,30]>=0) &(dt[:,30]<=1)&(dt[:,31]>=0) &(dt[:,31]<=1)&(dt[:,32]>=0) &(dt[:,32]<=1)&(dt[:,33]>=0) &(dt[:,33]<=1)&(dt[:,34]>=-0.01) &(dt[:,34]<=1)
dt = dt[correct]
correct = (dt[:,3]>100)
dt = dt[correct]
correct = (dt[:,19]>20)
dt = dt[correct]
correct = (dt[:,0]>=2800)
dt = dt[correct]
bsmlike = np.where(dt[:,16]>0.9)[0]
dt = dt[bsmlike]
j1sdb = dt[:,3]*dt[:,16]
j2sdb = dt[:,19]*dt[:,32]
pt = dt[:,1]
m = j1sdb[:]
m2 = j2sdb[:]
tau21 = dt[:,4]
tau32 = dt[:,5]
tau43 = dt[:,6]
tau54 = dt[:,7]
tau65 = dt[:,8]
massratio = dt[:,16]
rho = np.log((m*m)/(pt*pt))
rhoprime = np.log((m*m)/(pt*1))
tau21prime = tau21 + rhoprime * 0.088
tau32prime = tau32 + rhoprime * 0.025
tau43prime = tau43 + rhoprime * 0.01
tau54prime = tau54 + rhoprime * 0.001
j2pt = dt[:,2]
#m = j1sdb[mrange]
j2m = j2sdb[:]
j2tau21 = dt[:,20]
j2tau32 = dt[:,21]
j2tau43 = dt[:,22]
j2tau54 = dt[:,23]
j2tau65 = dt[:,24]
j2massratio = dt[:,32]
j2rho = np.log((j2m*j2m)/(j2pt*j2pt))
j2rhoprime = np.log((j2m*j2m)/(j2pt*1))
j2tau21prime = j2tau21 + j2rhoprime * 0.086
j2tau32prime = j2tau32 + j2rhoprime * 0.025
j2tau43prime = j2tau43 + j2rhoprime * 0.01
j2tau54prime = j2tau54 + j2rhoprime * 0.001
dt[:,4] = tau21prime
dt[:,5] = tau32prime
dt[:,6] = tau43prime
dt[:,7] = tau54prime
dt[:,20] = j2tau21prime
dt[:,21] = j2tau32prime
dt[:,22] = j2tau43prime
dt[:,23] = j2tau54prime
if sigbit_flag:
idx = dt[:,-1]
sigidx = (idx == 1)
bkgidx = (idx == 0)
if is_sig:
dt = dt[sigidx]
else:
dt = dt[bkgidx]
if n_prong == '2prong':
correct = dt[:,3] > 300
dt = dt[correct]
if n_prong == '3prong':
correct = dt[:,3] > 400
dt = dt[correct]
m1minusm2 = dt[:,3] - dt[:,19]
dt[:,19] = m1minusm2
Y = dt[:,[3,4,5,6,7,8,11,12,19,20,21,22,23,24,27,28]]
#Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]]
#Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]]
#Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
print(Y.shape)
for i in range(16):
Y[:,i] = (Y[:,i]-sig_mean[i])/sig_std[i]
total_bb_test = torch.tensor(Y)
#huberloss = nn.SmoothL1Loss(reduction='none')
sigae_bbloss = torch.mean((sigmodel(total_bb_test.float().cuda())[0]- total_bb_test.float().cuda())**2,dim=1).data.cpu().numpy()
bbvar = torch.var((sigmodel(total_bb_test.float().cuda())[0]- total_bb_test.float().cuda())**2,dim=1).data.cpu().numpy()
waic = sigae_bbloss + bbvar
#sigae_bbloss = torch.mean(huberloss(model(total_bb_test.float().cuda())[0],total_bb_test.float().cuda()),dim=1).data.cpu().numpy()
print(waic[0:10])
plt.hist(waic,bins=np.linspace(0,10,1001),density=True);
plt.xlim([0,2])
np.save(out_file_waic,waic)
np.save(out_file_justloss,sigae_bbloss)
loss_prong3 = np.load(f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_3prong.npy")
loss_prong2 = np.load(f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_2prong.npy")
loss_purebkg = np.load(f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_purebkg.npy")
loss_rndbkg = np.load(f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_rndbkg.npy")
plt.hist(loss_purebkg,bins=np.linspace(0,4,100),density=False,alpha=0.3,label='Pure Bkg');
#plt.hist(loss_rndbkg,bins=np.linspace(0,2,100),density=False,alpha=0.3,label='(rnd) bkg');
plt.hist(loss_prong2,bins=np.linspace(0,4,100),density=False,alpha=0.3,label='2prong (rnd)sig');
plt.hist(loss_prong3,bins=np.linspace(0,4,100),density=False,alpha=0.3,label='3prong (rnd)sig');
#plt.yscale('log')
plt.xlabel('Loss (SigAE trained on 2prong sig)')
plt.legend(loc='upper right')
#plt.savefig('sigae_trained_on_2prongsig.png')
ae_def
len(loss_prong2)
outputlist_waic
outputlist_justloss
sigae_bbloss
ae_def
sigae_bbloss
plt.hist(sigae_bbloss,bins=np.linspace(0,10,1001));
np.save('../data_strings/sigae_2prong_loss_bb3.npy',sigae_bbloss)
X_bkg = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
X_bkg = X_bkg[bkg_idx]
for i in range(12):
X_bkg[:,i] = (X_bkg[:,i]-sig_mean[i])/sig_std[i]
total_bkg_test = torch.tensor(X_bkg)
sigae_bkgloss = torch.mean((sigmodel(total_bkg_test.float().cuda())[0]- total_bkg_test.float().cuda())**2,dim=1).data.cpu().numpy()
sigae_sigloss = torch.mean((sigmodel(total_sig.float().cuda())[0]- total_sig.float().cuda())**2,dim=1).data.cpu().numpy()
f_3prong = pd.read_hdf("/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_3prong_rnd.h5")
f_bb1 = pd.read_hdf('/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB1_rnd.h5')
dt_bb1 = f_bb1.values
X_bb1 = dt_bb1[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
X_bb1.shape
sig_mean
sig_std
for i in range(12):
X_bb1[:,i] = (X_bb1[:,i]-sig_mean[i])/sig_std[i]
plt.hist(X_bb1[:,0],bins = np.linspace(-2,2,10))
(torch.tensor(dt[i * chunk_size:(i + 1) * chunk_size]) for i in range )
def get_loss(dt):
chunk_size=5000
total_size=1000000
i = 0
i_max = total_size // chunk_size
print(i_max)
gen = (torch.tensor(dt[i*chunk_size: (i + 1) * chunk_size]) for i in range(i_max))
with torch.no_grad():
loss = [
n
for total_in_selection in gen
for n in torch.mean((sigmodel(total_in_selection.float().cuda())[0]- total_in_selection.float().cuda())**2,dim=1).data.cpu().numpy()
]
return loss
def get_loss(dt):
def generator(dt, chunk_size=5000, total_size=1000000):
i = 0
i_max = total_size // chunk_size
print(i_max)
for i in range(i_max):
start=i * chunk_size
stop=(i + 1) * chunk_size
yield torch.tensor(dt[start:stop])
loss = []
with torch.no_grad():
for total_in_selection in generator(dt,chunk_size=5000, total_size=1000000):
loss.extend(torch.mean((sigmodel(total_in_selection.float().cuda())[0]- total_in_selection.float().cuda())**2,dim=1).data.cpu().numpy())
return loss
bb1_loss_sig = get_loss(X_bb1)
bb1_loss_sig = np.array(bb1_loss_sig,dtype=np.float)
print(bb1_loss_sig)
plt.hist(bb1_loss_sig,bins=np.linspace(0,100,1001));
np.save('../data_strings/sigaeloss_bb1.npy',bb1_loss_sig)
dt_3prong = f_3prong.values
Z = dt_3prong[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
Z.shape
for i in range(12):
Z[:,i] = (Z[:,i]-sig_mean[i])/sig_std[i]
total_3prong = torch.tensor(Z)
bkgae_bkgloss = torch.mean((model(total_bkg_test.float().cuda())[0]- total_bkg_test.float().cuda())**2,dim=1).data.cpu().numpy()
bkgae_3prongloss = torch.mean((model(total_3prong.float().cuda())[0]- total_3prong.float().cuda())**2,dim=1).data.cpu().numpy()
sigae_3prongloss = torch.mean((sigmodel(total_3prong.float().cuda())[0]- total_3prong.float().cuda())**2,dim=1).data.cpu().numpy()
sigae_3prongloss.shape
bins = np.linspace(0,10,1001)
plt.hist(sigae_sigloss,bins,weights = np.ones(len(signal_idx))*10,alpha=0.4,color='r',label='2 prong signal');
plt.hist(sigae_3prongloss,bins,weights = np.ones(100000)*10,alpha=0.5,color='g',label='3 prong signal');
plt.hist(sigae_bkgloss,bins,alpha=0.4,color='b',label='background');
#plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')
plt.legend(loc='upper right')
plt.xlabel('Signal AE Loss',fontsize=15)
def get_tpr_fpr(sigloss,bkgloss,aetype='sig'):
bins = np.linspace(0,50,1001)
tpr = []
fpr = []
for cut in bins:
if aetype == 'sig':
tpr.append(np.where(sigloss<cut)[0].shape[0]/len(sigloss))
fpr.append(np.where(bkgloss<cut)[0].shape[0]/len(bkgloss))
if aetype == 'bkg':
tpr.append(np.where(sigloss>cut)[0].shape[0]/len(sigloss))
fpr.append(np.where(bkgloss>cut)[0].shape[0]/len(bkgloss))
return tpr,fpr
def get_precision_recall(sigloss,bkgloss,aetype='bkg'):
bins = np.linspace(0,100,1001)
tpr = []
fpr = []
precision = []
for cut in bins:
if aetype == 'sig':
tpr.append(np.where(sigloss<cut)[0].shape[0]/len(sigloss))
precision.append((np.where(sigloss<cut)[0].shape[0])/(np.where(bkgloss<cut)[0].shape[0]+np.where(sigloss<cut)[0].shape[0]))
if aetype == 'bkg':
tpr.append(np.where(sigloss>cut)[0].shape[0]/len(sigloss))
precision.append((np.where(sigloss>cut)[0].shape[0])/(np.where(bkgloss>cut)[0].shape[0]+np.where(sigloss>cut)[0].shape[0]))
return precision,tpr
tpr_2prong, fpr_2prong = get_tpr_fpr(sigae_sigloss,sigae_bkgloss,'sig')
tpr_3prong, fpr_3prong = get_tpr_fpr(sigae_3prongloss,sigae_bkgloss,'sig')
plt.plot(fpr_2prong,tpr_2prong,label='signal AE')
#plt.plot(VAE_bkg_fpr,VAE_bkg_tpr,label='Bkg VAE-Vanilla')
plt.plot(bkg_fpr4,bkg_tpr4,label='Bkg NFlowVAE-Planar')
plt.xlabel(r'$1-\epsilon_{bkg}$',fontsize=15)
plt.ylabel(r'$\epsilon_{sig}$',fontsize=15)
#plt.semilogy()
#plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')
plt.legend(loc='lower right')
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
plt.savefig('ROC_Curve_sigae.png')
precision,recall = get_precision_recall(loss_sig,loss_bkg,aetype='bkg')
np.save('NFLOWVAE_PlanarNEW_22var_sigloss.npy',loss_sig)
np.save('NFLOWVAE_PlanarNEW_22var_bkgloss.npy',loss_bkg)
np.save('NFLOWVAE_PlanarNEW_precision.npy',precision)
np.save('NFLOWVAE_PlanarNEW_recall.npy',recall)
np.save('NFLOWVAE_PlanarNEW_bkgAE_fpr.npy',bkg_fpr)
np.save('NFLOWVAE_PlanarNEW_bkgAE_tpr.npy',bkg_tpr)
np.save('NFLOWVAE_PlanarNEW_sigloss.npy',loss_sig)
np.save('NFLOWVAE_PlanarNEW_bkgloss.npy',loss_bkg)
plt.plot(recall,precision)
flows = [1,2,3,4,5,6]
zdim = [1,2,3,4,5]
for N_flows in flows:
for Z_DIM in zdim:
model = VAE_NF(N_FLOWS, Z_DIM).cuda()
optimizer = optim.Adam(model.parameters(), lr=LR)
BEST_LOSS = 99999
LAST_SAVED = -1
PATIENCE_COUNT = 0
PATIENCE_LIMIT = 5
for epoch in range(1, N_EPOCHS):
print("Epoch {}:".format(epoch))
train()
cur_loss = evaluate()
if cur_loss <= BEST_LOSS:
PATIENCE_COUNT = 0
BEST_LOSS = cur_loss
LAST_SAVED = epoch
print("Saving model!")
if mode == 'ROC':
torch.save(model.state_dict(),f"/data/t3home000/spark/QUASAR/weights/bkg_vae_NF_planar_RND_22var_z{Z_DIM}_f{N_FLOWS}.h5")
else:
torch.save(model.state_dict(), f"/data/t3home000/spark/QUASAR/weights/bkg_vae_NF_planar_PureBkg_22var_z{Z_DIM}_f{N_FLOWS}.h5")
else:
PATIENCE_COUNT += 1
print("Not saving model! Last saved: {}".format(LAST_SAVED))
if PATIENCE_COUNT > 3:
print("Patience Limit Reached")
break
loss_bkg = get_loss(dt_PureBkg[bkg_idx])
loss_sig = get_loss(dt_PureBkg[signal_idx])
np.save(f'NFLOWVAE_PlanarNEW_22var_z{Z_DIM}_f{N_flows}_sigloss.npy',loss_sig)
np.save(f'NFLOWVAE_PlanarNEW_22var_z{Z_DIM}_f{N_flows}_bkgloss.npy',loss_bkg)
```
| github_jupyter |
Adapted from [https://github.com/PacktPublishing/Bioinformatics-with-Python-Cookbook-Second-Edition](https://github.com/PacktPublishing/Bioinformatics-with-Python-Cookbook-Second-Edition), Chapter 2.
```
conda config --add channels bioconda
conda install tabix pyvcf
```
You can also check the functions available in `scikit-allel` [here](http://alimanfoo.github.io/2017/06/14/read-vcf.html)
Example of VCF file. Nice explanation by Colleen Saunders can be found [here](https://training.h3abionet.org/IBT_2017/wp-content/uploads/2017/06/Module5_Session4_part3.mp4):
```
##fileformat=VCFv4.3
##reference=file:///seq/references/1000GenomesPilot-NCBI36.fasta
##contig=<ID=20,length=62435964,assembly=B36,md5=f126cdf8a6e0c7f379d618ff66beb2da,species="Homo sapiens",taxonomy=x>
##INFO=<ID=DP,Number=1,Type=Integer,Description="Total Depth">
##INFO=<ID=AF,Number=A,Type=Float,Description="Allele Frequency">
##INFO=<ID=DB,Number=0,Type=Flag,Description="dbSNP membership, build 129">
##FILTER=<ID=q10,Description="Quality below 10">
##FILTER=<ID=s50,Description="Less than 50% of samples have data">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##FORMAT=<ID=DP,Number=1,Type=Integer,Description="Read Depth">
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT NA00001 NA00002 NA00003
20 14370 rs6054257 G A 29 PASS DP=14;AF=0.5;DB GT:DP 0/0:1 0/1:8 1/1:5
20 17330 . T A 3 q10 DP=11;AF=0.017 GT:DP 0/0:3 0/1:5 0/0:41
20 1110696 rs6040355 A G,T 67 PASS DP=10;AF=0.333,0.667;DB GT:DP 0/2:6 1/2:0 2/2:4
20 1230237 . T . 47 PASS DP=13 GT:DP 0/0:7 0/0:4 ./.:.
```
# Getting the necessary data
You just need to do this only once
```
!rm -f data/genotypes.vcf.gz 2>/dev/null
!tabix -fh ftp://ftp-trace.ncbi.nih.gov/1000genomes/ftp/release/20130502/supporting/vcf_with_sample_level_annotation/ALL.chr22.phase3_shapeit2_mvncall_integrated_v5_extra_anno.20130502.genotypes.vcf.gz 22:1-17000000|bgzip -c > data/genotypes.vcf.gz
!tabix -p vcf data/genotypes.vcf.gz
from collections import defaultdict
%matplotlib inline
import seaborn as sns
import matplotlib.pyplot as plt
import vcf
```
Variant record level
* AC: total number of alternative allelels in called genotypes
* AF: estimated allele frequency
* NS: number of samples with data
* AN: total number of alleles in called genotypes
* DP: total read depth
* (...)
Sample level:
* GT: genotype
* DP: per sample read depth
```
v = vcf.Reader(filename='data/genotypes.vcf.gz')
print('Variant Level information')
infos = v.infos
for info in infos:
print(info)
print('Sample Level information')
fmts = v.formats
for fmt in fmts:
print(fmt)
```
Let us inspect a single VCF record
```
v = vcf.Reader(filename='data/genotypes.vcf.gz')
rec = next(v)
print('=====\nCHROM, POS, ID, REF, ALT, QUAL, FILTER' )
print(rec.CHROM, rec.POS, rec.ID, rec.REF, rec.ALT, rec.QUAL, rec.FILTER)
print('=====\nVariant-level info')
print(rec.INFO)
print(rec.FORMAT)
print('=====\nSAMPLE ID\'s')
samples = rec.samples
print(len(samples))
sample = samples[0]
print(sample.called, sample.gt_alleles, sample.is_het, sample.is_variant, sample.phased)
print(int(sample['DP']))
```
let us check the type of variant and the number onbiallelic SNPs
```
f = vcf.Reader(filename='data/genotypes.vcf.gz')
my_type = defaultdict(int)
num_alts = defaultdict(int)
for rec in f:
my_type[rec.var_type, rec.var_subtype] += 1
if rec.is_snp:
num_alts[len(rec.ALT)] += 1
print(my_type)
print(num_alts)
```

```
f = vcf.Reader(filename='data/genotypes.vcf.gz')
sample_dp = defaultdict(int)
for rec in f:
if not rec.is_snp or len(rec.ALT) != 1:
continue
for sample in rec.samples:
dp = sample['DP']
if dp is None:
dp = 0
dp = int(dp)
sample_dp[dp] += 1
dps = list(sample_dp.keys())
dps.sort()
dp_dist = [sample_dp[x] for x in dps]
fig, ax = plt.subplots(figsize=(16, 9))
ax.plot(dp_dist[:50], 'r')
ax.axvline(dp_dist.index(max(dp_dist)))
```
| github_jupyter |
# Trabalhando com Arquivos
Tabela Modos de arquivo

# Métodos de uma lista usando biblioteca rich import inspect
```
from rich import inspect
a = open('arquivo1.txt', 'wt+')
inspect(a, methods=True)
```
# Criando Arquivo w(write) e x
# .close()
```
# cria arquivo ou abre apagando os dados de qualquer arquivo existente
a = open('arquivo1.txt', 'wt+') # w(write text) + (se não existir o arquivo crie) (t decodifica os caracteres Unicode-é default não precisa colocar)
a.close()
# cria arquivo, mas falha se o mesmo ja existir
a = open('arquivo1.txt', 'x')
a.close()
```
# .writefile()
Criar arquivo pelo Jupyter Notebook
```
%%writefile teste.txt
Olá este arquivo foi gerado pelo próprio Jupyter Notebook.
Podemos gerar quantas linhas quisermos e o Jupyter gera o arquivo final.
la...
la....
```
# Abrindo/Lendo arquivos r(read)
```
#Abre arquivo como leitura
a = open('arquivo1.txt', 'r' ,encoding="utf-8") # rt(read text) (,encoding="utf-8")
a.close()
#Abre arquivo como escrita e não apaga o anterior
a = open('arquivo1.txt', 'a') # at(append text)
a.close()
```
# .read() e encoding="utf-8"
O método read() somente funciona se abrir o arquivo como leitura ('r')
Ao abrir o arquivo com uma codificação diferente da que ele foi escrito, alguns caracteres podem apresentar erros, ou, em alguns sistemas operacionais, como no Mac OS, pode ser lançada uma exceção
Tipos de encoding:
https://docs.python.org/3/library/codecs.html#standard-encodings
```
arq4 = open("teste.txt", 'r',encoding="utf-8")
print(arq4.read())
a = open('arquivo1.txt', 'rt',encoding="utf-8") # (,encoding="utf-8") mostra as acentuações
print(a.read())
a = open('arquivo1.txt', 'rt')
print(a.read())
a.close()
a = open('arquivo1.txt', 'rt',encoding="utf-8")
print(a.read(3)) # Lendo os três primeiros caracteres
a.close()
```
# .read() e encoding="latin_1"
O método read() somente funciona se abrir o arquivo como leitura ('r')
Ao abrir o arquivo com uma codificação diferente da que ele foi escrito, alguns caracteres podem apresentar erros, ou, em alguns sistemas operacionais, como no Mac OS, pode ser lançada uma exceção
Tipos de encoding:
https://docs.python.org/3/library/codecs.html#standard-encodings
```
a = open('contatos.csv', encoding='latin_1')
print(a.read())
a.close()
```
# .readlines()
readlines ler linha por linha e coloca em uma lista
```
a = open("teste.txt", 'r',encoding="utf-8")
print(a.read())
a.seek(0)
print(a.readlines())
a.close()
```
# readline() Vs readlines()
readline() - ler somente uma linha
readlines() - coloca todas as linhas em um lista
```
# Testando readline()
from sys import getsizeof
with open('contatos.csv', 'r', encoding='latin_1') as a:
conteudo = a.readline()
print(conteudo)
print(f'conteudo = {getsizeof(conteudo)} bytes')
# Testando readlines()
from sys import getsizeof
with open('contatos.csv', 'r', encoding='latin_1') as a:
conteudo = a.readlines()
print(conteudo)
print(f'conteudo = {getsizeof(conteudo)} bytes')
```
# .seek()
```
arq4 = open("teste.txt", 'r',encoding="utf-8")
print(f'Lendo o arquivo a primeira vez:\n\n{arq4.read()}')
print(f'Tentando ler novamente e não conseguimos\n\n{arq4.read()}') # como ja lemos o arquivo ate o final temos que retornar com seek()
arq4.seek(0)
print(f'Após o uso do seek conseguimos ler novamente!\n\n{arq4.read()}')
```
# .split()
separar os caracteres
```
# separando caracteres por linhas
f = open('salarios.csv', 'r')
data = f.read()
rows = data.split('\n') # '\n' é um espaço separamos por espaço
print(rows) # cada '' é uma linha
f.close()
# separando caracteres por colunas
f = open('salarios.csv', 'r')
data = f.read()
rows = data.split('\n') # '\n' é um espaço separamos por espaço
dados = []
for row in rows:
split_row = row.split(',') # agora dentro de '' vamos separar por "," pois o arquivo é um csv
dados.append(split_row)
print(dados)
f.close()
```
# .tell()
Contar o número de caracteres
```
a = open('arquivo1.txt', 'r', encoding="utf-8")
a.read() # Se não ler o mesmo o .tell() não funciona
print(a.tell())
a.close()
```
# .flush()
Uma característica de quando a gente está trabalhando com escrita de arquivo no Python. A gente precisa fechar o arquivo para indicar que a gente não está mais trabalhando com ele. Somente após fechar é que as edições serão salvas, mas e se não podermos fechar o arquivo?
Com método **flush**, os dados vão ser escritos, porém o arquivo vai continuar aberto.
```
arquivos_contatos = open('contatos.csv', mode='a', encoding='latin_1')
novo_contato = '11,Livio,livio@livio.com.br\n'
arquivos_contatos.write(novo_contato)
arquivos_contatos.flush()
arquivos_contatos.close()
```
# <font color=#FF0000>**with open**</font>
```
with open('arquivo1.txt', mode='r', encoding="utf-8") as a:
conteudo = a.read()
print(conteudo)
```
# <font color=#FF0000>**with open - newline=''**</font>
Ao final de cada linha em um arquivo temos uma instrução de quebra de linha '\n' que significa que o texto irá para proxima linha. Este caractere é oculto, mas conseguimos ve-lo colocando **readlines()**.
Quando não usamos o newline='' o caractere é igual á '\n' (padrão Linux/unix/python) ao utilizar o newline='' o caractere muda para '\r\n' (padrão Microsoft)
# <font color=#FF0000>**diferença entre \n e \r\n e newline=''**</font>
O **\n** significa "new line" ou "line-feed", ou seja, **"nova linha"**.
O **\r** significa "carriage return", ou seja **"retorno de linha"**.
Quando a tabela ASCII foi padronizada, o \n recebeu o código 10 e \r recebeu o código 13.
_A ideia originalmente, quando as tabelas de codificação de caracteres como sequências de bits foram concebidas, é que o \n fosse interpretado como o comando para fazer o cursor se mover para baixo, e o \r o comando para ele se mover de volta até o começo da linha._
> Essa distinção era importante para as máquinas de escrever digitais que precederam os computadores, para telégrafos digitais, para teletipos e para a programação das primeiras impressoras que surgiram. De fato, isso é surpreendentemente mais antigo do que se pensa, já aparecendo no ano de 1901 junto com algumas das primeiras dessas tabelas de codificação de caracteres.
Assim sendo, em **um texto para que uma quebra-de-linha fosse inserida, fazia-se necessário utilizar-se \r\n**. Primeiro o cursor deveria se mover até o começo da linha e depois para baixo. **E foi esse o padrão de quebra-de-linha adotado muito mais tarde pela Microsoft.**
Já o Multics (e posteriormente o Unix) seguiram um caminho diferente, e decidiram implementar o **\n** como quebra-de-linha, o que já incluía um retorno ao começo da linha. Afinal de contas, não tem lá muito sentido ter uma coisa sem ter a outra junto, e **ao utilizá-los como sendo uma coisa só, garante-se que nunca serão separados**. Isso também tem a vantagem de economizar espaço ao usar um só byte para codificar a quebra-de-linha ao invés de dois, e naqueles anos aonde a memória era pequena e o processamento de baixo poder, cada byte economizado contava bastante.
Outras empresas, como a Apple e a Commodore, também seguiram um caminho semelhante ao do Unix, mas ao invés de adotarem o \n para quebras-de-linha, adotaram o \r.
Outras empresas menores adotaram outros códigos para a quebra-de-linha. Por exemplo, o QNX adotou o caractere 30 da tabela ASCII. A Atari adotou o 155. A Acorn e o RISC OS adotaram o \n\r ao invés de \r\n. A Sinclair adotou o 118.
**_Em resumo: Linux utiliza \n que representa \r(retorno ao primeiro caractere da linha) e \n(nova linha). A Apple utiliza \r que representa \r(retorno ao primeiro caractere da linha) e \n(nova linha). Já a Microsoft utiliza o padrão como \r\n. Ao usar o redline='' representamos a quebra de linha como \r\n se ocultarmos o mesmo a quebra de linha será \n._**
```
# Sem newline='' - caractere de fim de linha = '\n' padrão UNIX/Python
with open('arquivo1.txt', mode='r', encoding="utf-8") as a:
print(a.readlines())
# veja que ao final de cada linha temos o '\n'. É usado para indicar o fim de uma linha de texto.
# Com newline='' - caractere de fim de linha = '\r\n' padrão Microsoft
with open('arquivo1.txt', mode='r', encoding="utf-8", newline='') as a:
print(a.readlines())
# veja que ao final de cada linha temos o '\n'. É usado para indicar o fim de uma linha de texto..
```
# Escrevendo no arquivos a(append)
```
with open('arquivo1.txt', 'a', encoding="utf-8") as a:
a.write('\nEditando arquivo!!!') #\n é um enter, se iniciarmos com ele daremos um enter e apos isso escreveremos.
# a.read() se usar este comando ira dar erro, lembre-se que .read() somente se abrir o arquivo como leitura ('r')
with open('arquivo1.txt', 'r', encoding="utf-8") as a:
print(a.read())
```
# Trabalhando em modo b(binário) (imagens)
```
# criando uma copia da imagem python-logo.png
with open("python-logo.png", "rb") as imagem:
data = imagem.read()
with open("python-logo2.png", "wb") as imagem2:
imagem2.write(data)
```
# Lendo arquivos linha a linha e protegendo uso de memoria
```
from sys import getsizeof
with open('contatos.csv', 'r', encoding='latin_1') as a:
for numero, linha in enumerate(a):
print(f'Imprimindo linha {numero} | {getsizeof(linha)}-bytes\n {linha}', end='')
```
# Erros comuns ao tentar abrir um arquivo.
1. **FileNotFoundError** - Não encontrar o arquivo no local especificado.
1. **PermissionError** - Não tem permissão de escrita/criação no diretorio.
## try + finally
```
# Tratando erros com try:
try:
arquivo = open('contatos.csv', mode='a+', encoding='latin_1')
# Em mode='a' o arquivo abre na ultima linha, colocamos seek(0) para retornar a 1ª linha
# assim o readlines funcionar.
arquivo.seek(0)
conteudo = arquivo.readlines()
print(conteudo)
# finally será executando sempre, é comum colocarmos este tratamento para fechar o arquivo,
# apos o uso. Assim liberando o mesmo para outras pessoas.
finally:
arquivo.close()
```
## simulando FileNotFoundError
* Modificando o nome do arquivo para um arquivo que não existe.
* Abrindo em mode='r', pois em w e a se não existir o arquivo o Python cria
```
try:
arquivo = open('arquivo_nao_existe.csv', mode='r', encoding='latin_1')
arquivo.seek(0)
conteudo = arquivo.readlines()
print(conteudo)
finally:
arquivo.close()
```
### Solução com except FileNotFoundError:
**Agora nosso script não quebra caso não encontre o arquivo**
```
try:
arquivo = open('arquivo_nao_existe.csv', mode='r', encoding='latin_1')
arquivo.seek(0)
conteudo = arquivo.readlines()
print(conteudo)
except FileNotFoundError:
print('Arquivo não encontrado')
except PermissionError:
print('Sem permissão de escrita')
finally:
arquivo.close()
```
### Substituindo finally por with
* with fecha automaticamente um arquivo
* usando Lists Comprehensions simples para imprimir linha a linha
> Utilizamos o comando with para gerenciar o contexto de utilização do arquivo. Além de arquivos, podemos utilizar o with para gerenciar processos que precisam de uma pré e pós condição de execução; por exemplo: abrir e fechar o arquivo, realizar conexão com o banco de dados, sockets, entre outros.
> O objeto que está sendo manipulado pelo with precisa implementar dois métodos mágicos: \_\_enter__() e \_\_exit__().
> O método \_\_enter__() é executado logo no início da chamada da função e retorna uma representação do objeto que está sendo executada no contexto (ou context guard). Ao final, o método \_\_exit__() é invocado, e o contexto da execução, finalizado.
```
try:
with open('contatos.csv', mode='r', encoding='latin_1') as arquivo:
[print(linha, end='') for linha in arquivo]
except FileNotFoundError:
print('Arquivo não encontrado')
except PermissionError:
print('Sem permissão de escrita')
```
# De csv p/ Python
* converter um arquivo csv para um objeto list no python
* usando modulo csv
* criando uma função
## Criando uma class contatos
```
class Contato():
def __init__(self, id: int, nome: str, email: str):
self.id = id
self.nome = nome
self.email = email
```
## Criando uma função csv para list python
```
import csv
def csv_para_contatos(caminho: str, encoding: str = 'Latin_1'):
contatos: list = []
try:
with open(caminho, encoding=encoding) as a:
leitor = csv.reader(a)
for linha in leitor:
id, nome, email = linha # desencapsulando
contato = Contato(int(id), nome, email)
contatos.append(contato)
return contatos
except FileNotFoundError:
print('Arquivo não encontrado')
except PermissionError:
print('Sem permissão de escrita')
```
## Testando com arquivo contatos.csv
```
contatos = csv_para_contatos('contatos.csv')
lista = [print(f'{contato.id} - {contato.nome} - {contato.email}') for contato in contatos]
```
# De objeto Python para json
* converter um objeto python para um arquivo json
* usando modulo json
* criando uma função
## Criando uma função objeto python para json
```
import json
# escrita
def contatos_para_json(contatos, caminho: str):
try:
with open(caminho, mode='w') as a:
json.dump(contatos, a, default=__contato_para_json)
except FileNotFoundError:
print('Arquivo não encontrado')
except PermissionError:
print('Sem permissão de escrita')
def __contato_para_json(contato):
return contato.__dict__
# leitura
def json_para_contatos(caminho: str):
contatos = []
try:
with open(caminho, mode='r') as a:
contatos_json = json.load(a)
# Contato(contato['id'], contato['nome'], contato['email']) = Contato(**contato)
# assim estariamos desempacotando
[contatos.append(Contato(contato['id'], contato['nome'], contato['email']))
for contato in contatos_json]
return contatos
except FileNotFoundError:
print('Arquivo não encontrado')
except PermissionError:
print('Sem permissão de escrita')
```
## Testando de objeto python para json
```
# transformando csv em objeto python
contatos = csv_para_contatos('contatos.csv')
# transformando objeto python em json
contatos_para_json(contatos, 'contatos.json')
# transformando json em objeto python
contatos = json_para_contatos('contatos.json')
lista = [print(f'{contato.id} - {contato.nome} - {contato.email}') for contato in contatos]
import json
# json.dump = usado para gravar dados de objeto python em arquivo json
# json.dumps = usado para transformar objetos python em objetos str json
# json.load = usado para ler um arquivo json e transforma-lo em objto python
# Codificando hierarquias básicas de objetos Python:
lista = ['foo', {'bar': ('baz', None, 1.0, 2)}]
json_dump = json.dumps(lista)
print(f'{json_dump = }')
dicionario = {"c": 0, "b": 0, "a": 0}
json_dump = json.dumps(dicionario, sort_keys=True)
print(f'{json_dump = }')
# Codificação compacta:
lista = [1, 2, 3, {'4': 5, '6': 7}]
print(json.dumps(lista, separators=(',', ':')))
print(json.dumps(lista))
# Impressão bonita:
dicionario = {'4': 5, '6': 7}
print(json.dumps(dicionario, sort_keys=True, indent=4))
# Decodificando JSON:
texto = '["foo", {"bar":["baz", null, 1.0, 2]}]'
print(json.loads(texto))
import json
developer_Dict = {
"name": "Jane Doe",
"salary": 9000,
"skills": ["Python", "Machine Learning", "Web Development"],
"email": "jane.doe@pynative.com"
}
print(type(developer_Dict))
developer_str = json.dumps(developer_Dict)
print(developer_Dict)
print(type(developer_str))
import json
sampleDict = {
"colorList": ["Red", "Green", "Blue"],
"carTuple": ("BMW", "Audi", "range rover"),
"sampleString": "pynative.com",
"sampleInteger": 457,
"sampleFloat": 225.48,
"booleantrue": True,
"booleanfalse": False,
"nonevalue": None
}
print("Converting Python primitive types into JSON")
resultJSON = json.dumps(sampleDict)
print("Done converting Python primitive types into JSON")
print(resultJSON)
```
# <font color=#FF0000>**json**</font>
Geralmente, anexar dados a um arquivo JSON não é uma ideia muito boa porque, para cada pequena atualização, você deve ler e analisar todo o objeto de arquivo. Se o seu arquivo JSON tiver n entradas, a complexidade do tempo de execução de apenas atualizá-lo é O (n) .
**_Uma abordagem melhor seria armazenar os dados como um arquivo CSV que pode ser lido linha por linha que simplifica a análise e atualização significativamente, apenas acrescentando uma única linha ao arquivo que tem complexidade de tempo de execução constante._**
# Sintaxe de <font color=#FF0000>**json.dump()**</font> e <font color=#FF0000>**json.dumps()**</font>
>json.dump(obj, fp, *, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, default=None, sort_keys=False, **kw)
**É usado para gravar um objeto Python em um arquivo como dados formatados em JSON.**
>json.dumps(obj, *, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, default=None, sort_keys=False, **kw)
**É usado para escrever um objeto Python em uma String JSON.**
* **obj** nada mais é que um objeto serializável Python que você deseja converter em um formato JSON.
* A **fp** é um ponteiro de arquivo usado para gravar dados formatados em JSON em um arquivo. O módulo json Python sempre produz objetos de string, não objetos de bytes, portanto, fp.write()deve suportar a entrada de string.
* Se **skipkeysfor** verdadeiro (padrão: False), então as chaves de dict que não são de um tipo básico, (str, int, float, bool, None) serão ignoradas em vez de aumentar a TypeError. Por exemplo, se uma de suas chaves de dicionário for um objeto Python personalizado, essa chave será omitida durante a conversão do dicionário em JSON.
* Se **ensure_ascii** for verdadeiro (o padrão), a saída terá a garantia de ter todos os caracteres não ASCII de entrada com escape. Se ensure_asciifor falso, esses caracteres serão reproduzidos no estado em que se encontram.
* **allow_nan** é True por padrão, então seus equivalentes JavaScript (NaN, Infinity, -Infinity) serão usados. Se for False, será um ValueError para serializar valores flutuantes fora do intervalo (nan, inf, -inf).
* Um **indent** argumento é usado para imprimir JSON para torná-lo mais legível. O padrão é (', ', ': '). Para obter a representação JSON mais compacta, você deve usar (',', ':') para eliminar os espaços em branco.
*Se **sort_keys** for verdadeiro (padrão: Falso), a saída dos dicionários será classificada por chave
# <font color=#FF0000>**json.load()**</font> - Lendo um arquivo json formatado e transformando em dict
```json
{
"permissões": {
"1": {"nome": "Desenvolvedor", "descrição": "Tem acesso full ao sistema"},
"2": {"nome": "Administrador Master", "descrição": "Tem acesso full as funcionalidades do sistema e não pode ser apagado"},
"3": {"nome": "Administrador", "descrição": "Tem acesso full as funcionalidades do sistema e pode ser apagado"},
"4": {"nome": "Escrita", "descrição": "Tem acesso para inserção de dados no sistema e pode se bloquear telas do mesmo"},
"5": {"nome": "Leitura", "descrição": "Tem acesso para leitura de dados no sistema e pode se bloquear telas do mesmo"}
},
"bloqueio_tela": {
"2": {"tela_bloqueadas": []},
"3": {"tela_bloqueadas": []},
"4": {"tela_bloqueadas": []},
"5": {"tela_bloqueadas": []}
},
"telas": {},
"menu_config": {
"0": [{"icon_left": "account", "texto": "_users", "icon_right": "chevron-right", "status_icon_right": "True", "func_icon_right": "config_user", "cor": "False"},
{"icon_left": "tools", "texto": "_project", "icon_right": "chevron-right", "status_icon_right": "True", "func_icon_right": "config_project", "cor": "False"}]
}
}
```
Abrindo arquivo json de varios niveis e transformando o mesmo em objeto dict em python. Por fim manipulando o dict.
```
import json
with open('config_app.json', mode='r', encoding='utf-8') as a:
json_obj = json.load(a)
print(f'Type = {type(json_obj)}')
for key, data in json_obj.items():
print(f'\n{key} - {data}')
print('-'*100)
print(f'\n Imprimindo nivel 2:\n{json_obj["permissões"]["5"]}')
print(f'\n Imprimindo nivel 3:\n{json_obj["permissões"]["5"]["nome"]}')
```
# <font color=#FF0000>**json.load()**</font> - Trabalhando com json.load()
transformando obj json em dict **_(json.loads)_** python e obj json **_(json.dumps)_**
```
import json
json_string = '{"first_name": "Guido", "last_name":"Rossum"}'
print(f'{json_string = }')
print(f'{type(json_string) = }')
# A mesma pode ser analisado assim:
parsed_json = json.loads(json_string)
print(f'\n{parsed_json = }')
print(f'{type(parsed_json) = }')
# e agora pode ser usado como um dicionário normal:
print(f'\n{parsed_json["first_name"] = }')
# convertendo novamente para json
json_obj = json.dumps(parsed_json)
print(f'\n{json_obj = }')
print(f'{type(json_obj) = }')
```
# <font color=#FF0000>**json.dumps()**</font> para converter tipos primitivos Python em equivalentes JSON
Existem vários cenários em que você precisa usar dados JSON serializados em seu programa. Se você precisar desses dados JSON serializados em seu aplicativo de processamento adicional, poderá convertê-los em um **str objeto Python** nativo em vez de gravá-los em um arquivo.
Por exemplo, você recebe uma solicitação HTTP para enviar detalhes do desenvolvedor. você buscou dados de desenvolvedor da tabela de banco de dados e os armazenou em um dicionário Python ou qualquer objeto Python, agora você precisa enviar esses dados de volta para o aplicativo solicitado, então você precisa converter o objeto de dicionário Python em uma string formatada em JSON para enviar como um resposta na string JSON. Para fazer isso, você precisa usar json.dumps().
O json.dumps() retorna a representação de string JSON do Python dict.
## converter o dicionário Python em uma string formatada em JSON
```
import json
def SendJsonResponse(resultDict):
print("Convert Python dictionary into JSON formatted String")
developer_str = json.dumps(resultDict)
print(developer_str)
# sample developer dict
dicionario = {
"name": "Jane Doe",
"salary": 9000,
"skills": ["Python", "Machine Learning", "Web Development"],
"email": "jane.doe@pynative.com"
}
print(f'Type dicionario = {type(dicionario)}')
print(f'{dicionario = }')
string_json = json.dumps(dicionario)
print(f'\nType string_json= {type(string_json)}')
print(f'{string_json = }')
```
# <font color=#FF0000>**json.dumps()**</font> - Mapeamento entre entidades JSON e Python durante a codificação
Para codificar objetos Python no módulo JSON equivalente a JSON, usa-se a seguinte tabela de conversão. A json.dump() e json.dumps() executa o método as traduções quando codificam.
Agora vamos ver como converter todos os tipos primitivos Python, tais como dict, list, set, tuple, str, números em JSON dados formatados. Consulte a tabela a seguir para saber o mapeamento entre os tipos de dados JSON e Python.
Python | Json
:---: | :---:
dict | object
list, tuple | array
str | string
int, float, int & float-derived Enums | number
True | true
False | false
None | null
```
import json
dicionario = {
"colorList": ["Red", "Green", "Blue"],
"carTuple": ("BMW", "Audi", "range rover"),
"sampleString": "pynative.com",
"sampleInteger": 457,
"sampleFloat": 225.48,
"booleantrue": True,
"booleanfalse": False,
"nonevalue": None
}
print(f'Type dicionario = {type(dicionario)}')
print(f'{dicionario = }')
string_json = json.dumps(dicionario)
print(f'\nType string_json= {type(string_json)}')
print(f'{string_json = }')
from json import dumps
#! dict para obj json
carros_dict = {'marca': 'Toyota', 'modelo': 'Corolla', 'cor': 'chumbo'}
print(carros_dict)
print(type(carros_dict))
# transformando em objeto json
carros_json = dumps(carros_dict)
print(f'\n{carros_json}')
print(type(carros_json))
from json import dumps
#! tuple() to array json []
carros_tuple = ('Toyota', 'VW', 'Honda', 'BMW')
print(carros_tuple)
print(type(carros_tuple))
# transformando em objeto json
carros_json = dumps(carros_tuple)
print(f'\n{carros_json}')
print(type(carros_json))
from json import dumps
#! list[] to array json[]
carros_list = ['Toyota', 'VW', 'Honda', 'BMW']
print(carros_list)
print(type(carros_list))
# transformando em objeto json
carros_json = dumps(carros_list)
print(f'\n{carros_json}')
print(type(carros_json))
```
# <font color=#FF0000>**json.dump()**</font> - Para codificar e gravar dados JSON em um arquivo
Para gravar a resposta JSON em um arquivo: Na maioria das vezes, ao executar uma solicitação GET, você recebe uma resposta no formato JSON e pode armazenar a resposta JSON em um arquivo para uso futuro ou para uso de um sistema subjacente.
Por exemplo, você tem dados em uma lista ou dicionário ou qualquer objeto Python e deseja codificá-los e armazená-los em um arquivo na forma de JSON.
Vamos converter o dicionário Python em um formato JSON e gravá-lo em um arquivo, sendo:
1. **SEM FORMATAÇÃO NO ARQUIVO JSON**. (file_json_sem_formatar.json)
```json
{"bloqueio_tela": {"5": {"tela_bloqueadas": []}, "3": {"tela_bloqueadas": []}, "1": {"tela_bloqueadas": []}, "2": {"tela_bloqueadas": []}}}
```
2. **RECUADOS E FORMATADOS**. (file_json_formatado.json)
* indent=4 --> _4 espaços de indentação_
* separators=(', ', ': ') --> _formato com espaço apos "," e apos ":"_
* sort_keys=True --> _as chavas são gravadas em ordem crescente_
```json
{
"bloqueio_tela": {
"1": {
"tela_bloqueadas": []
},
"2": {
"tela_bloqueadas": []
},
"3": {
"tela_bloqueadas": []
},
"5": {
"tela_bloqueadas": []
}
}
}
```
3. **CODIFICAÇÃO COMPACTA PARA ECONOMIZAR ESPAÇO**. (file_json_compacto.json)
* separators=(',', ':') --> _eliminando os espaços e formatação_
```json
{"bloqueio_tela":{"1":{"tela_bloqueadas":[]},"2":{"tela_bloqueadas":[]},"3":{"tela_bloqueadas":[]},"5":{"tela_bloqueadas":[]}}}
```
```
import json
dicionario = {"bloqueio_tela": {"5": {"tela_bloqueadas": []}, "3": {"tela_bloqueadas": []}, "1": {"tela_bloqueadas": []}, "2": {"tela_bloqueadas": []}}}
# criando um arquivo sem formatar:
with open('file_json_sem_formatar.json', mode='w', encoding='utf-8') as write_file:
json.dump(dicionario, write_file)
# criando um arquivo json formatado, com recuo, espaços apos "," e ":" e em orden crescente de chaves:
with open('file_json_formatado.json', mode='w', encoding='utf-8') as write_file:
json.dump(dicionario, write_file, indent=4, separators=(', ', ': '), sort_keys=True)
# criando um arquivo json sem formatação e sem espaço para economizar tamanho com "," e ":":
with open('file_json_compacto.json', mode='w', encoding='utf-8') as write_file:
json.dump(dicionario, write_file, separators=(',', ':'), sort_keys=True)
# podemos tambem trocar o sinal que divide key e chave com separators
print(json.dumps(dicionario, separators=(',', '='), sort_keys=True))
```
# <font color=#FF0000>**json.dump()**</font> - Pule os tipos não básicos ao gravar JSON em um arquivo usando o parâmetro skipkeys
O módulo json integrado do Python só pode lidar com tipos primitivos Python que tenham um equivalente JSON direto (por exemplo, dicionário, listas, strings, ints, Nenhum, etc.).
Se o dicionário Python contiver um objeto Python personalizado como uma das chaves e se tentarmos convertê-lo em um formato JSON, você obterá um TypeError, isto é <font color=#FF0000>**_Object of type "Your Class" is not JSON serializable_**</font>,.
Se este objeto personalizado não for necessário em dados JSON, você pode ignorá-lo usando um **_skipkeys=true_** argumento do json.dump() método.
Se **_skipkeys=true_** for True, então as dict chaves que não são de um tipo básico (str, int, float, bool, None) serão ignoradas em vez de gerar um TypeError.
```json
{"salario": 9000, "skills": ["Python", "Machine Learning", "Web Development"], "email": "jane.doe@pynative.com"}
```
Obs.: Sem o DadosPessoais: usuario
Artigo para transformar tipos não basicos em json:
<https://pynative.com/make-python-class-json-serializable/>
```
import json
class DadosPessoais():
def __init__(self, name: str, age: int):
self.name = name
self.age = age
def showInfo(self):
print("Nome é " + self.name, "Idade é ", self.age)
# instanciando um objeto
usuario = DadosPessoais("João", 36)
dicionario = {
DadosPessoais: usuario,
"salario": 9000,
"skills": ["Python", "Machine Learning", "Web Development"],
"email": "jane.doe@pynative.com"
}
# criando arquivo json sem tipos não basicos (obj DadosPessoais)
with open("file_json_sem_tipos_nao_basicos.json", mode='w', encoding='utf-8') as write_file:
json.dump(dicionario, write_file, skipkeys=True)
```
# <font color=#FF0000>**json.dumps()**</font> - Lidar com caracteres não ASCII de dados JSON ao gravá-los em um arquivo
O json.dump() método possui ensure_ascii parâmetro. O ensure_ascii é verdadeiro por padrão. A saída tem a garantia de ter todos os caracteres não ASCII de entrada com escape. Se ensure_ascii for falso, esses caracteres serão reproduzidos no estado em que se encontram. Se você deseja armazenar caracteres não ASCII, no estado em que se encontra, use o código a seguir.
Obs.: Se usar o **ensure_ascii=False** como parametro do json.dump o mesmo irá salvar palavras com acentuação no arquivo json. Uma boa pratica ao abrir estes arquivos é usar o encoding utf-8
~~~
# boa pratica se salvar um json com ensure_ascii=False é abrir o mesmo com encoding utf-8
with open(caminho, mode='r', encoding='utf-8') as read_file:
~~~
```
import json
# encoding in UTF-8
unicode_data= {
"string1": "明彦",
"string2": u"\u00f8"}
print(f'{unicode_data = }')
# dumps com ensure_ascii=False
encoded_unicode = json.dumps(unicode_data, ensure_ascii=False)
print(f'{encoded_unicode = }')
encoded_unicode = json.dumps(unicode_data, ensure_ascii=True)
print(f'{encoded_unicode = }')
# dumps com ensure_ascii=True (default)
print(json.loads(encoded_unicode))
```
# <font color=#FF0000>**CSV (Comma Separated Values)**</font> - Trabalhando com arquivo CSV
# <font color=#FF0000>**csv.reader**</font> - Leia CSV com delimitador de vírgula
csv.reader função no modo padrão para arquivos CSV com delimitador de vírgula.
csv.reader(file) ou csv.reader(file, delimiter=',') é a mesma coisa, uma vez que o delimiter default é = ','
```
import csv
with open('contatos.csv', mode='r', encoding='utf8', newline='') as file:
# csv.reader(file) ou csv.reader(file, delimiter=',') é a mesma coisa, uma vez que o delimiter default é = ','
csv_reader = csv.reader(file)
for row in csv_reader:
print(row)
```
# <font color=#FF0000>**csv.reader - delimiter='\t'**</font> - Leia CSV com delimitador diferente
Por padrão, uma vírgula é usada como delimitador em um arquivo CSV. No entanto, alguns arquivos CSV podem usar outros delimitadores além de vírgulas. Os populares | e \t(tab).
```
import csv
with open('contatos_com_delimitador_tab.csv', mode='r', encoding='utf8', newline='') as file:
# csv.reader(file) ou csv.reader(file, delimiter=',') é a mesma coisa, uma vez que o delimiter default é = ','
csv_reader = csv.reader(file)
for row in csv_reader:
print(row)
print('-' * 70)
# utilizando seek(0) para retornar ao inicio do arquivo para ler novamente
file.seek(0)
# agora usando o delimeter '\t'(\t = tab)
csv_reader = csv.reader(file, delimiter='\t')
for row in csv_reader:
print(row)
```
# <font color=#FF0000>**csv.reader - skipinitialspace=True**</font> - Leia arquivos CSV com espaços iniciais
Isso permite que o reader objeto saiba que as entradas possuem um espaço em branco inicial. Como resultado, os espaços iniciais que estavam presentes após um delimitador são removidos.
```
import csv
with open('contatos_com_espaços.csv', mode='r', encoding='utf8', newline='') as file:
csv_reader = csv.reader(file)
for row in csv_reader:
print(row)
print('-' * 70)
# utilizando seek(0) para retornar ao inicio do arquivo para ler novamente
file.seek(0)
# agora usando o skipinitialspace=True para eliminar os espaços
csv_reader = csv.reader(file, skipinitialspace=True)
for row in csv_reader:
print(row)
```
# <font color=#FF0000>**csv.reader - quoting=csv.QUOTE_ALL, skipinitialspace=True**</font> - Ler arquivos CSV com aspas
Como você pode ver, passamos csv.QUOTE_ALL para o quoting parâmetro. É uma constante definida pelo csv módulo.
csv.QUOTE_ALL especifica o objeto leitor que todos os valores no arquivo CSV estão presentes entre aspas.
Existem 3 outras constantes predefinidas que você pode passar para o quoting parâmetro:
* csv.QUOTE_MINIMAL- Especifica o reader objeto que o arquivo CSV tem aspas em torno das entradas que contêm caracteres especiais, como delimitador , quotechar ou qualquer um dos caracteres no determinador de linha .
* csv.QUOTE_NONNUMERIC- Especifica o reader objeto que o arquivo CSV tem aspas em torno das entradas não numéricas.
* csv.QUOTE_NONE - Especifica o objeto leitor que nenhuma das entradas tem aspas ao redor.
```
import csv
with open('arquivo_csv_com_aspas.csv', mode='r', encoding='utf8', newline='') as file:
csv_reader = csv.reader(file)
for row in csv_reader:
print(row)
print('-' * 70)
# utilizando seek(0) para retornar ao inicio do arquivo para ler novamente
file.seek(0)
# agora usando o quoting=csv.QUOTE_ALL, skipinitialspace=True para eliminar as aspas e espaços
csv_reader = csv.reader(file, quoting=csv.QUOTE_ALL, skipinitialspace=True)
for row in csv_reader:
print(row)
```
# <font color=#FF0000>**csv.reader - dialect='myDialect'**</font> - Ler arquivos CSV usando dialeto
Passamos vários parâmetros ( delimiter, quotinge, skipinitialspace) para a csv.reader()função.
Essa prática é aceitável ao lidar com um ou dois arquivos. Mas isso tornará o código mais redundante e feio quando começarmos a trabalhar com vários arquivos CSV com formatos semelhantes. Como solução para isso, o csv módulo oferece dialect como parâmetro opcional.
Dialeto ajuda a agrupar muitos padrões de formatação específicas, como delimiter, skipinitialspace, quoting, escapecharem um único nome dialeto.
Ele pode então ser passado como um parâmetro para várias writer ou reader instâncias.
```
import csv
with open('arquivo_csv_uso_dialetos.csv', mode='r', encoding='utf8', newline='') as file:
csv_reader = csv.reader(file)
for row in csv_reader:
print(row)
print('-' * 70)
# utilizando seek(0) para retornar ao inicio do arquivo para ler novamente
file.seek(0)
# registrando um dialeto
csv.register_dialect('myDialect', delimiter='|', skipinitialspace=True, quoting=csv.QUOTE_ALL)
# agora usando o dialect='myDialect'
csv_reader = csv.reader(file, dialect='myDialect')
for row in csv_reader:
print(row)
"""A vantagem de usar dialect é que torna o programa mais modular. Observe que podemos reutilizar
'myDialect' para abrir outros arquivos sem ter que especificar novamente o formato CSV."""
```
# <font color=#FF0000>**csv.DictReader**</font>
Entradas da primeira linha são as chaves do dicionário. E as entradas nas outras linhas são os valores do dicionário.
```
import csv
with open('contatos.csv', mode='r', encoding='utf8', newline='') as file:
csv_file = csv.DictReader(file)
for row in csv_file:
print(row) # python >= 3.8 print(dict(row)) python < 3.8
# Entradas da primeira linha são as chaves do dicionário. E as entradas nas outras linhas são os valores do dicionário.
```
# <font color=#FF0000>**csv.writer writerow**</font> - Gravando linha por linha com writerow
A csv.writer()função retorna um writer objeto que converte os dados do usuário em uma string delimitada. Esta string pode ser usada posteriormente para gravar em arquivos CSV usando a writerow()função. Vamos dar um exemplo.
```
import csv
# Gravando linha por linha com writerow
with open('arquivo_csv_writer.csv', mode='w', encoding='utf8', newline='') as file:
writer = csv.writer(file)
writer.writerow(["SN", "Movie", "Protagonist"])
writer.writerow([1, "Lord of the Rings", "Frodo Baggins"])
writer.writerow([2, "Harry Potter", "Harry Potter"])
```
# <font color=#FF0000>**csv.writer writerows**</font> - Gravando várias linhas com writerows
```
import csv
# Gravando varias linhas com writerows
lista = [["SN", "Movie", "Protagonist"], [1, "Lord of the Rings", "Frodo Baggins"], [2, "Harry Potter", "Harry Potter"]]
with open('arquivo_csv_writer_rows.csv', mode='w', encoding='utf8', newline='') as file:
writer = csv.writer(file)
writer.writerows(lista)
```
# <font color=#FF0000>**csv.writer - delimiter**</font> - Gravando em um arquivo CSV com delimitador
```
cod nome email
1 Joao joao@gmail.com
2 Amanda amanda@gmail.com
3 Arthur arthur@gmail.com
4 Matheus matheus@gmail.com
5 Gustavo gustavo@gmail.com
6 Renato renato@gmail.com
```
```
import csv
lista = [['cod', 'nome', 'email'], ['1', 'Joao', 'joao@gmail.com'], ['2', 'Amanda', 'amanda@gmail.com'],
['3', 'Arthur', 'arthur@gmail.com'], ['4', 'Matheus', 'matheus@gmail.com'], ['5', 'Gustavo', 'gustavo@gmail.com'],
['6', 'Renato', 'renato@gmail.com']]
with open('contatos_com_delimitador_tab.csv', mode='w', encoding='utf8', newline='') as file:
writer = csv.writer(file, delimiter='\t')
writer.writerows(lista)
```
# <font color=#FF0000>**csv.writer - quoting=csv.QUOTE_NONNUMERIC**</font> - Gravando em um arquivo CSV com aspas
* _csv.QUOTE_NONNUMERIC_ Especifica o writer objeto que as aspas devem ser adicionadas às entradas **não numéricas**.
* _csv.QUOTE_ALL_ Especifica o writer objeto para gravar o arquivo CSV com aspas em torno de **todas as entradas**.
* _csv.QUOTE_MINIMAL_ Especifica o writer objeto para citar apenas os campos que contêm caracteres especiais (delimitador , quotechar ou quaisquer caracteres no determinador de linha)
* _csv.QUOTE_NONE_ Especifica o writer objeto que nenhuma das entradas deve ser citada. **É o valor padrão**.
```
"cod","nome","email"
1,"Joao","joao@gmail.com"
2,"Amanda","amanda@gmail.com"
3,"Arthur","arthur@gmail.com"
4,"Matheus","matheus@gmail.com"
5,"Gustavo","gustavo@gmail.com"
6,"Renato","renato@gmail.com"
```
```
import csv
lista = [["cod", "nome", "email"], [1, 'Joao', 'joao@gmail.com'], [2, 'Amanda', "amanda@gmail.com"],
[3, 'Arthur', 'arthur@gmail.com'], [4, "Matheus", 'matheus@gmail.com'], [5, 'Gustavo', 'gustavo@gmail.com'],
[6, "Renato", 'renato@gmail.com']]
with open('arquivo_csv_com_aspas.csv', mode='w', encoding='utf8', newline='') as file:
writer = csv.writer(file, quoting=csv.QUOTE_NONNUMERIC)
writer.writerows(lista)
```
# <font color=#FF0000>**csv.writer - quoting=csv.QUOTE_NONNUMERIC e quotechar='*'**</font> - Gravando arquivos CSV com caractere de citação personalizado
```
*cod*,*nome*,*email*
*1*,*Joao*,*joao@gmail.com*
*2*,*Amanda*,*amanda@gmail.com*
*3*,*Arthur*,*arthur@gmail.com*
*4*,*Matheus*,*matheus@gmail.com*
*5*,*Gustavo*,*gustavo@gmail.com*
*6*,*Renato*,*renato@gmail.com*
```
```
import csv
lista = [['cod', 'nome', 'email'], ['1', 'Joao', 'joao@gmail.com'], ['2', 'Amanda', 'amanda@gmail.com'],
['3', 'Arthur', 'arthur@gmail.com'], ['4', 'Matheus', 'matheus@gmail.com'], ['5', 'Gustavo', 'gustavo@gmail.com'],
['6', 'Renato', 'renato@gmail.com']]
with open('arquivo_csv_com_quotechar.csv', mode='w', encoding='utf8', newline='') as file:
writer = csv.writer(file, quoting=csv.QUOTE_NONNUMERIC, quotechar='*')
writer.writerows(lista)
```
# <font color=#FF0000>**csv.writer - dialect='myDialect'**</font> - Gravando arquivos CSV usando dialeto
A vantagem de usar dialect é que torna o programa mais modular. Observe que podemos reutilizar myDialect para gravar outros arquivos CSV sem ter que especificar novamente o formato CSV.
```
*cod*|*nome*|*email*
*1*|*Joao*|*joao@gmail.com*
*2*|*Amanda*|*amanda@gmail.com*
*3*|*Arthur*|*arthur@gmail.com*
*4*|*Matheus*|*matheus@gmail.com*
*5*|*Gustavo*|*gustavo@gmail.com*
*6*|*Renato*|*renato@gmail.com*
```
```
import csv
lista = [['cod', 'nome', 'email'], ['1', 'Joao', 'joao@gmail.com'], ['2', 'Amanda', 'amanda@gmail.com'],
['3', 'Arthur', 'arthur@gmail.com'], ['4', 'Matheus', 'matheus@gmail.com'], ['5', 'Gustavo', 'gustavo@gmail.com'],
['6', 'Renato', 'renato@gmail.com']]
csv.register_dialect('myDialect', delimiter='|', quoting=csv.QUOTE_NONNUMERIC, quotechar='*')
with open('arquivo_csv_uso_dialetos.csv', mode='w', encoding='utf8', newline='') as file:
writer = csv.writer(file, dialect='myDialect')
writer.writerows(lista)
```
# <font color=#FF0000>**csv.DictWriter**</font> - Gravando arquivos CSV atraves de uma lista de dicionarios
```
cod,nome,email
1,Joao,joao@gmail.com
2,Amanda,amanda@gmail.com
3,Arthur,arthur@gmail.com
4,Matheus,matheus@gmail.com
5,Gustavo,gustavo@gmail.com
6,Renato,renato@gmail.com
```
```
import csv
lista = [{'cod': 1, 'nome': 'Joao', 'email': 'joao@gmail.com'}, {'cod': 2, 'nome': 'Amanda', 'email': 'amanda@gmail.com'},
{'cod': 3, 'nome': 'Arthur', 'email': 'arthur@gmail.com'}, {'cod': 4, 'nome': 'Matheus', 'email': 'matheus@gmail.com'},
{'cod': 5, 'nome': 'Gustavo', 'email': 'gustavo@gmail.com'}, {'cod': 6, 'nome': 'Renato', 'email': 'renato@gmail.com'}]
with open('arquivo_csv_dictWriter.csv', mode='w', encoding='utf8', newline='') as file:
fieldnames = ['cod', 'nome', 'email']
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(lista)
```
# <font color=#FF0000>**csv to Excel com openpyxl**</font> - Transformando arquivos CSV em Excel
```
import csv
from openpyxl import Workbook
import os
wb = Workbook()
ws = wb.active
with open('salarios.csv') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
ws.append(row)
wb.save('salarios.xlsx')
os.startfile('salarios.xlsx')
# os.system("start EXCEL.EXE salarios.xlsx")
# os.system("open -a 'path/Microsoft Excel.app' 'path/file.xlsx'")
```
| github_jupyter |
```
import autograd.numpy as np
import autograd.numpy.random as npr
npr.seed(0)
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set_style("white")
sns.set_context("talk")
color_names = ["windows blue",
"red",
"amber",
"faded green",
"dusty purple",
"orange",
"clay",
"pink",
"greyish",
"mint",
"light cyan",
"steel blue",
"forest green",
"pastel purple",
"salmon",
"dark brown"]
colors = sns.xkcd_palette(color_names)
import ssm
from ssm.variational import SLDSMeanFieldVariationalPosterior, SLDSTriDiagVariationalPosterior
from ssm.util import random_rotation, find_permutation
```
## Ring attractor
https://www.sciencedirect.com/science/article/pii/S0896627318303258
```
###Ring attractor (multi-attractor) model
Neu = 100 #number of cells
taum = 0.01 #membrain time constant
k = 0.1 #gain
gm = 100 #conductance
Wa = -40/gm #average weight
Wd = 33/gm #tuning-dependent
###synaptic weights
Wij = np.zeros((Neu,Neu)) #connectivity matrix
deg2rad = np.pi/180
ths = np.linspace(-90,90,Neu)*deg2rad #preferred tuning direction
for ii in range(Neu):
for jj in range(Neu):
Wij[ii,jj] = Wa + Wd/Neu*np.cos(ths[ii]-ths[jj])
#np.exp( (np.cos(ths[ii]-ths[jj])-1)/lsyn**2 )
Wij = (Wij-np.mean(Wij))*1 ##??
plt.plot(ths,Wij);
plt.xlabel('angle (rad)')
plt.ylabel('weight')
plt.figure()
plt.imshow(Wij)
plt.xlabel('i')
plt.ylabel('j')
###stimulus
T = 100 #sec
dt = 0.01 #10ms
time = np.arange(0,T,dt) #time series
b = 2
c = 0.5
Am = 0.1
def the2h(ti,tstim):
return b + c*(1-Am+Am*np.cos(ti-tstim))
#b + c*Am*np.exp( (np.cos(ti-tstim)-1)/lstim**2 )
taun = 10 #noise correlation
noise = 10
h = np.zeros(len(time))
for tt in range(0,len(time)-1):
h[tt+1] = h[tt] + (ths[int(len(ths)/2)]-h[tt])*dt/taun + np.sqrt(taun*dt)*np.random.randn()*noise
# if h[tt+1]>180:
# h[tt+1] = h[tt+1]-180
# if h[tt+1]<0:
# h[tt+1] = 180+h[tt+1]
#h = h*deg2rad
# smoothed = 200
# temp = np.convolve(np.random.randn(len(time))*180,np.exp(-np.arange(1,smoothed,1)/smoothed),'same')
h = np.mod(h,180)*deg2rad - np.pi/2
plt.plot(time,h,'o')
plt.xlabel('time (s)')
plt.ylabel('head angle (rad)')
###neural dynamics
Vr = 0
V = np.zeros((Neu,len(time))) #neurons by time
V[:,0] = Vr + np.random.randn(Neu) #initialization
r = np.zeros((Neu,len(time)))
r[:,0] = gm*np.tanh(k*V[:,0]) #k*(np.max((V[:,0]-V0)))**nn
for tt in range(0,len(time)-1):
ht = np.array([the2h(hh,h[tt]) for hh in ths]) #input bump
V[:,tt+1] = V[:,tt] + dt*(-V[:,tt] + ht + np.dot(Wij,r[:,tt]))/taum + np.sqrt(dt*taum)*np.random.randn(Neu)*1
temp = V[:,tt+1].copy()
temp[temp<0] = 0
r[:,tt+1] = gm*np.tanh(k*temp) #k*(np.max((V[:,tt+1]-V0)))**nn
extent = [0,T,ths[0],ths[-1]]
plt.imshow(r, aspect="auto",extent=extent)
#plt.plot(V.T);
plt.plot(r[:,2:].T);
###PCA test
X = r[:,2:].copy()
C = np.cov(X)
u,s,v = np.linalg.svd(C)
PCs = np.dot(u[:,:3].T,X)
from mpl_toolkits.mplot3d import Axes3D
ax = plt.axes(projection='3d')
ax.plot3D(PCs[1,:], PCs[2,:], PCs[0,:])
```
## SLDS fitting
```
XX = np.array(X[np.random.choice(np.arange(0,Neu,1),10),:])
XX = XX[:,np.arange(0,len(time),10)]
#XX = X.copy()
plt.imshow(XX, aspect="auto")
XX.shape
# Set the parameters of the HMM
T = XX.shape[1] # number of time bins
K = 3 # number of discrete states
D = 2 # number of latent dimensions
N = XX.shape[0] # number of observed dimensions
# Make an SLDS with the true parameters
true_slds = ssm.SLDS(N, K, D, transitions="recurrent_only", emissions="gaussian_orthog")
for k in range(K):
true_slds.dynamics.As[k] = .95 * random_rotation(D, theta=(k+1) * np.pi/20)
z, x, y = true_slds.sample(T)
# Mask off some data
y = XX.T.copy()
mask = npr.rand(T, N) < 0.9
y_masked = y * mask
print("Fitting SLDS with SVI")
# Create the model and initialize its parameters
slds = ssm.SLDS(N, K, D, emissions="gaussian_orthog")
slds.initialize(y_masked, masks=mask)
# Create a variational posterior
q_mf = SLDSMeanFieldVariationalPosterior(slds, y_masked, masks=mask)
q_mf_elbos = slds.fit(q_mf, y_masked, masks=mask, num_iters=1000, initialize=False)
# Get the posterior mean of the continuous states
q_mf_x = q_mf.mean[0]
# Find the permutation that matches the true and inferred states
slds.permute(find_permutation(z, slds.most_likely_states(q_mf_x, y)))
q_mf_z = slds.most_likely_states(q_mf_x, y)
# Smooth the data under the variational posterior
q_mf_y = slds.smooth(q_mf_x, y)
print("Fitting SLDS with SVI using structured variational posterior")
slds = ssm.SLDS(N, K, D, emissions="gaussian_orthog")
slds.initialize(y_masked, masks=mask)
q_struct = SLDSTriDiagVariationalPosterior(slds, y_masked, masks=mask)
q_struct_elbos = slds.fit(q_struct, y_masked, masks=mask, num_iters=1000, initialize=False)
# Get the posterior mean of the continuous states
q_struct_x = q_struct.mean[0]
# Find the permutation that matches the true and inferred states
slds.permute(find_permutation(z, slds.most_likely_states(q_struct_x, y)))
q_struct_z = slds.most_likely_states(q_struct_x, y)
# Smooth the data under the variational posterior
q_struct_y = slds.smooth(q_struct_x, y)
###try with switching!
rslds = ssm.SLDS(N, K, D,
transitions="recurrent_only",
dynamics="diagonal_gaussian",
emissions="gaussian_orthog",
single_subspace=True)
rslds.initialize(y)
q = SLDSTriDiagVariationalPosterior(rslds, y)
elbos = rslds.fit(q, y, num_iters=1000, initialize=False)
xhat = q.mean[0]
# Find the permutation that matches the true and inferred states
rslds.permute(find_permutation(z, rslds.most_likely_states(xhat, y)))
zhat = rslds.most_likely_states(xhat, y)
plt.figure()
plt.plot(elbos)
plt.xlabel("Iteration")
plt.ylabel("ELBO")
plt.plot(xhat[:,0],xhat[:,1])
# Plot the ELBOs
plt.plot(q_mf_elbos, label="MF")
plt.plot(q_struct_elbos, label="LDS")
plt.xlabel("Iteration")
plt.ylabel("ELBO")
plt.legend()
###discrete state vs. head direction
plt.subplot(211)
plt.imshow(np.row_stack((q_struct_z, q_mf_z)), aspect="auto")
plt.yticks([0, 1], ["$z_{\\mathrm{struct}}$", "$z_{\\mathrm{mf}}$"])
plt.subplot(212)
plt.plot(h)
plt.xlim(0,len(h))
plt.xlabel('time')
plt.ylabel('angle (rad)')
for kk in range(K):
pos = np.where(q_struct_z==kk)[0]
plt.plot(q_struct_x[pos,0],q_struct_x[pos,1])
plt.plot(q_mf_x[:,0],q_mf_x[:,1])
# Plot the true and inferred states
# xlim = (0, 500)
# plt.figure(figsize=(8,4))
# plt.imshow(np.row_stack((z, q_struct_z, q_mf_z)), aspect="auto")
# plt.plot(xlim, [0.5, 0.5], '-k', lw=2)
# plt.yticks([0, 1, 2], ["$z_{\\mathrm{true}}$", "$z_{\\mathrm{struct}}$", "$z_{\\mathrm{mf}}$"])
# plt.xlim(xlim)
plt.figure(figsize=(8,4))
plt.plot(x + 4 * np.arange(D), '-k')
for d in range(D):
plt.plot(q_mf_x[:,d] + 4 * d, '-', color=colors[0], label="MF" if d==0 else None)
plt.plot(q_struct_x[:,d] + 4 * d, '-', color=colors[1], label="Struct" if d==0 else None)
plt.ylabel("$x$")
#plt.xlim(xlim)
plt.legend()
# Plot the smoothed observations
plt.figure(figsize=(8,4))
for n in range(N):
plt.plot(y[:, n] + 4 * n, '-k', label="True" if n == 0 else None)
plt.plot(q_mf_y[:, n] + 4 * n, '--', color=colors[0], label="MF" if n == 0 else None)
plt.plot(q_struct_y[:, n] + 4 * n, ':', color=colors[1], label="Struct" if n == 0 else None)
plt.legend()
plt.xlabel("time")
#plt.xlim(xlim)
```
| github_jupyter |
```
import numpy as np
import cv2
import matplotlib.pyplot as plt
from pathlib import Path
from seaborn import color_palette
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import models, transforms, utils
import copy
from utils import *
%matplotlib inline
```
# CONVERT IMAGE TO TENSOR
```
class ImageDataset(torch.utils.data.Dataset):
def __init__(self, template_dir_path, image_name, thresh_csv=None, transform=None):
self.transform = transform
if not self.transform:
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
)
])
self.template_path = list(template_dir_path.iterdir())
self.image_name = image_name
self.image_raw = cv2.imread(self.image_name)
self.thresh_df = None
if thresh_csv:
self.thresh_df = pd.read_csv(thresh_csv)
if self.transform:
self.image = self.transform(self.image_raw).unsqueeze(0)
def __len__(self):
return len(self.template_names)
def __getitem__(self, idx):
template_path = str(self.template_path[idx])
template = cv2.imread(template_path)
if self.transform:
template = self.transform(template)
thresh = 0.7
if self.thresh_df is not None:
if self.thresh_df.path.isin([template_path]).sum() > 0:
thresh = float(self.thresh_df[self.thresh_df.path==template_path].thresh)
return {'image': self.image,
'image_raw': self.image_raw,
'image_name': self.image_name,
'template': template.unsqueeze(0),
'template_name': template_path,
'template_h': template.size()[-2],
'template_w': template.size()[-1],
'thresh': thresh}
template_dir = 'template/'
image_path = 'sample/sample1.jpg'
dataset = ImageDataset(Path(template_dir), image_path, thresh_csv='thresh_template.csv')
```
### EXTRACT FEATURE
```
class Featex():
def __init__(self, model, use_cuda):
self.use_cuda = use_cuda
self.feature1 = None
self.feature2 = None
self.model= copy.deepcopy(model.eval())
self.model = self.model[:17]
for param in self.model.parameters():
param.requires_grad = False
if self.use_cuda:
self.model = self.model.cuda()
self.model[2].register_forward_hook(self.save_feature1)
self.model[16].register_forward_hook(self.save_feature2)
def save_feature1(self, module, input, output):
self.feature1 = output.detach()
def save_feature2(self, module, input, output):
self.feature2 = output.detach()
def __call__(self, input, mode='big'):
if self.use_cuda:
input = input.cuda()
_ = self.model(input)
if mode=='big':
# resize feature1 to the same size of feature2
self.feature1 = F.interpolate(self.feature1, size=(self.feature2.size()[2], self.feature2.size()[3]), mode='bilinear', align_corners=True)
else:
# resize feature2 to the same size of feature1
self.feature2 = F.interpolate(self.feature2, size=(self.feature1.size()[2], self.feature1.size()[3]), mode='bilinear', align_corners=True)
return torch.cat((self.feature1, self.feature2), dim=1)
class MyNormLayer():
def __call__(self, x1, x2):
bs, _ , H, W = x1.size()
_, _, h, w = x2.size()
x1 = x1.view(bs, -1, H*W)
x2 = x2.view(bs, -1, h*w)
concat = torch.cat((x1, x2), dim=2)
x_mean = torch.mean(concat, dim=2, keepdim=True)
x_std = torch.std(concat, dim=2, keepdim=True)
x1 = (x1 - x_mean) / x_std
x2 = (x2 - x_mean) / x_std
x1 = x1.view(bs, -1, H, W)
x2 = x2.view(bs, -1, h, w)
return [x1, x2]
class CreateModel():
def __init__(self, alpha, model, use_cuda):
self.alpha = alpha
self.featex = Featex(model, use_cuda)
self.I_feat = None
self.I_feat_name = None
def __call__(self, template, image, image_name):
T_feat = self.featex(template)
if self.I_feat_name is not image_name:
self.I_feat = self.featex(image)
self.I_feat_name = image_name
conf_maps = None
batchsize_T = T_feat.size()[0]
for i in range(batchsize_T):
T_feat_i = T_feat[i].unsqueeze(0)
I_feat_norm, T_feat_i = MyNormLayer()(self.I_feat, T_feat_i)
dist = torch.einsum("xcab,xcde->xabde", I_feat_norm / torch.norm(I_feat_norm, dim=1, keepdim=True), T_feat_i / torch.norm(T_feat_i, dim=1, keepdim=True))
conf_map = QATM(self.alpha)(dist)
if conf_maps is None:
conf_maps = conf_map
else:
conf_maps = torch.cat([conf_maps, conf_map], dim=0)
return conf_maps
class QATM():
def __init__(self, alpha):
self.alpha = alpha
def __call__(self, x):
batch_size, ref_row, ref_col, qry_row, qry_col = x.size()
x = x.view(batch_size, ref_row*ref_col, qry_row*qry_col)
xm_ref = x - torch.max(x, dim=1, keepdim=True)[0]
xm_qry = x - torch.max(x, dim=2, keepdim=True)[0]
confidence = torch.sqrt(F.softmax(self.alpha*xm_ref, dim=1) * F.softmax(self.alpha * xm_qry, dim=2))
conf_values, ind3 = torch.topk(confidence, 1)
ind1, ind2 = torch.meshgrid(torch.arange(batch_size), torch.arange(ref_row*ref_col))
ind1 = ind1.flatten()
ind2 = ind2.flatten()
ind3 = ind3.flatten()
if x.is_cuda:
ind1 = ind1.cuda()
ind2 = ind2.cuda()
values = confidence[ind1, ind2, ind3]
values = torch.reshape(values, [batch_size, ref_row, ref_col, 1])
return values
def compute_output_shape( self, input_shape ):
bs, H, W, _, _ = input_shape
return (bs, H, W, 1)
```
# NMS AND PLOT
## SINGLE
```
def nms(score, w_ini, h_ini, thresh=0.7):
dots = np.array(np.where(score > thresh*score.max()))
x1 = dots[1] - w_ini//2
x2 = x1 + w_ini
y1 = dots[0] - h_ini//2
y2 = y1 + h_ini
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
scores = score[dots[0], dots[1]]
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= 0.5)[0]
order = order[inds + 1]
boxes = np.array([[x1[keep], y1[keep]], [x2[keep], y2[keep]]]).transpose(2, 0, 1)
return boxes
def plot_result(image_raw, boxes, show=False, save_name=None, color=(255, 0, 0)):
# plot result
d_img = image_raw.copy()
for box in boxes:
d_img = cv2.rectangle(d_img, tuple(box[0]), tuple(box[1]), color, 3)
if show:
plt.imshow(d_img)
if save_name:
cv2.imwrite(save_name, d_img[:,:,::-1])
return d_img
```
## MULTI
```
def nms_multi(scores, w_array, h_array, thresh_list):
indices = np.arange(scores.shape[0])
maxes = np.max(scores.reshape(scores.shape[0], -1), axis=1)
# omit not-matching templates
scores_omit = scores[maxes > 0.1 * maxes.max()]
indices_omit = indices[maxes > 0.1 * maxes.max()]
# extract candidate pixels from scores
dots = None
dos_indices = None
for index, score in zip(indices_omit, scores_omit):
dot = np.array(np.where(score > thresh_list[index]*score.max()))
if dots is None:
dots = dot
dots_indices = np.ones(dot.shape[-1]) * index
else:
dots = np.concatenate([dots, dot], axis=1)
dots_indices = np.concatenate([dots_indices, np.ones(dot.shape[-1]) * index], axis=0)
dots_indices = dots_indices.astype(np.int)
x1 = dots[1] - w_array[dots_indices]//2
x2 = x1 + w_array[dots_indices]
y1 = dots[0] - h_array[dots_indices]//2
y2 = y1 + h_array[dots_indices]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
scores = scores[dots_indices, dots[0], dots[1]]
order = scores.argsort()[::-1]
dots_indices = dots_indices[order]
keep = []
keep_index = []
while order.size > 0:
i = order[0]
index = dots_indices[0]
keep.append(i)
keep_index.append(index)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= 0.05)[0]
order = order[inds + 1]
dots_indices = dots_indices[inds + 1]
boxes = np.array([[x1[keep], y1[keep]], [x2[keep], y2[keep]]]).transpose(2,0,1)
return boxes, np.array(keep_index)
def plot_result_multi(image_raw, boxes, indices, show=False, save_name=None, color_list=None):
d_img = image_raw.copy()
if color_list is None:
color_list = color_palette("hls", indices.max()+1)
color_list = list(map(lambda x: (int(x[0]*255), int(x[1]*255), int(x[2]*255)), color_list))
for i in range(len(indices)):
d_img = plot_result(d_img, boxes[i][None, :,:].copy(), color=color_list[indices[i]])
if show:
plt.imshow(d_img)
if save_name:
cv2.imwrite(save_name, d_img[:,:,::-1])
return d_img
# RUNNING
def run_one_sample(model, template, image, image_name):
val = model(template, image, image_name)
if val.is_cuda:
val = val.cpu()
val = val.numpy()
val = np.log(val)
batch_size = val.shape[0]
scores = []
for i in range(batch_size):
# compute geometry average on score map
gray = val[i,:,:,0]
gray = cv2.resize( gray, (image.size()[-1], image.size()[-2]) )
h = template.size()[-2]
w = template.size()[-1]
score = compute_score( gray, w, h)
score[score>-1e-7] = score.min()
score = np.exp(score / (h*w)) # reverse number range back after computing geometry average
scores.append(score)
return np.array(scores)
def run_multi_sample(model, dataset):
scores = None
w_array = []
h_array = []
thresh_list = []
for data in dataset:
score = run_one_sample(model, data['template'], data['image'], data['image_name'])
if scores is None:
scores = score
else:
scores = np.concatenate([scores, score], axis=0)
w_array.append(data['template_w'])
h_array.append(data['template_h'])
thresh_list.append(data['thresh'])
return np.array(scores), np.array(w_array), np.array(h_array), thresh_list
model = CreateModel(model=models.vgg19(pretrained=True).features, alpha=25, use_cuda=True)
scores, w_array, h_array, thresh_list = run_multi_sample(model, dataset)
boxes, indices = nms_multi(scores, w_array, h_array, thresh_list)
d_img = plot_result_multi(dataset.image_raw, boxes, indices, show=True, save_name='result_sample.png')
plt.imshow(scores[2])
```
| github_jupyter |
# Assignment 2: Implementation of Selection Sort
## Deliverables:
We will again generate random data for this assignment.
1) Please set up five data arrays of length 5,000, 10,000, 15,000, 20,000, and 25,000 of uniformly distributed random numbers (you may use either integers or floating point).
Ensure that a common random number seed is used to generate each of the arrays.
2) Execute the base algorithm (Selection Sort) for each of the random number arrays, noting the execution time with each execution.
Use one of the timing methods we learned in class.
3) Just as in the last assignment, please organize the results of the study into a table showing the size of data array and the time taken to sort the array.
Discuss the differences in timing and how they relate to data type and length of array.
4) Use Python matpl otlib or Seaborn to generate a measure of the size of the data set on the horizontal axis and with execution time in milliseconds on the vertical axis.
The plot should show execution time against problem size for each form of the algorithm being tested.
### Prepare an exec summary of your results, referring to the table and figures you have generated. Explain how your results relate to big O notation. Describe your results in language that management can understand. This summary should be included as text paragraphs in the Jupyter notebook. Explain how the algorithm works and why it is a useful to data engineers.
# Discussion
### The selection sort algorithm as implemented below uses a nested for loop. The inner loop indentifies the smallest componenent of an array and it's index while the outer loop manipulates the arrays (adds the smallest element to the new array and removes the element from the parent array). Since we have these two for loops the algorithm grows at a rate of approximately n*n. There are two operations first we identify the smallest element, then we place it in the new array. In big O notation, this is denoted O(n^2). Figure 1 below shows the sort times as a function of the length of the array. It is apparent that the lowest point demonstrates the non-linear scaling of this algorithm which is confirmed by taking the square root of the time. Figure 2 shows the square root of time as a function of the length of the array and is approximately linear.
### In some data retrieval systems items are required to be indexed sequentially, so we need methodologies to sort them, selection sort provides this methodology in an easy to implement fashion, however it is not very efficient due to the nested operations. Below are the two functions, required for the sort:
1) FindSmallest will start at the first index of an array and set it to an object 'smallest' which will be used in a repetative logical evaluation.
As we progress through the length of the array, each time the next value is smaller than smallest, smallest is replaced and it's index also is captured in smallest index.
This continues until the entire array is processed.
2) SelectionSort will find use FindSmallest to search through a given array using FindSmallest in a nested fashion to find the smallest value ('small') in the given array and append it to a new array.
The found value is removed from the original array (via it's returned index in FindSmallest; 'smallest_index') and the algorightm continues until the are no elements in the original array. The new array is returned along with the elapsed time to complete the sort in milliseconds.
```
import numpy as np
import pandas as pd
from datetime import datetime
import seaborn as sns
import time
#FindSmallest will start at the first index of an array and set it to an object 'smallest' which will be used in a repetative logical evaluation. As we progress through the length of the array, each time the next value is smaller than smallest, smallest is replaced and it's index also is captured in smallest index. This continues until the entire array is processed.
def FindSmallest(arr):
smallest = arr[0]
smallest_index=0
for i in range(1, len(arr)):
if arr[i] < smallest:
smallest = arr[i]
smallest_index = i
return smallest_index, smallest
# SelectionSort will find use FindSmallest to search through a given array using FindSmallest in a nested fashion to find the smallest value ('small') in the given array and append it to a new array. The found value is removed from the original array (via it's returned index in FindSmallest; 'smallest_index') and the algorightm continues until the are no elements in the original array. The new array is returned along with the elapsed time to complete the sort in milliseconds.
def SelectionSort(arr):
newArr = []
start = time.perf_counter()
for i in range(len(arr)):
smallest =FindSmallest(arr)[1]
smallest_index = FindSmallest(arr)[0]
newArr.append(smallest) #adds smallest element to new array.
arr = np.delete(arr, smallest_index) # removes smallest element from parent array by index.
end = time.perf_counter()
return newArr , (end-start)*1E3
```
# A. Generate arrays with a common random seed
```
#Sets the Random Seed
RANDOM_SEED = 123
np.random.seed(RANDOM_SEED)
arr5E4 = np.random.randint(low=1, high= 1000001, size=5000)#5,000 elements, 1-1E6 (inclusive)
np.random.seed(RANDOM_SEED)
arr10E4 = np.random.randint(low=1, high= 1000001, size=10000)#10,000 elements, 1-1E6 (inclusive)
np.random.seed(RANDOM_SEED)
arr15E4 = np.random.randint(low=1, high= 1000001, size=15000)#15,000 elements, 1-1E6 (inclusive)
np.random.seed(RANDOM_SEED)
arr20E4 = np.random.randint(low=1, high= 1000001, size=20000)#20,000 elements, 1-1E6 (inclusive)
np.random.seed(RANDOM_SEED)
arr25E4 = np.random.randint(low=1, high= 1000001, size=25000)#25,000 elements, 1-1E6 (inclusive)
```
# B. Sort using SelectionSort function
```
sorted_5E4 = SelectionSort(arr5E4)
sorted_10E4 = SelectionSort(arr10E4)
sorted_15E4 = SelectionSort(arr15E4)
sorted_20E4 = SelectionSort(arr20E4)
sorted_25E4 = SelectionSort(arr25E4)
Summary = {
'NumberOfElements': [ len(sorted_5E4[0]), len(sorted_10E4[0]), len(sorted_15E4[0]),len(sorted_20E4[0]), len(sorted_25E4[0])],
'Time(ms)': [ sorted_5E4[1], sorted_10E4[1], sorted_15E4[1], sorted_20E4[1], sorted_25E4[1]]}
df = pd.DataFrame.from_dict(Summary)
df['rt(Time)'] = np.sqrt(df['Time(ms)'])
display(df)
```
## Fig 1. Sort times in milliseconds as a function of the number of elements.
```
sns.scatterplot(x=df['NumberOfElements'], y=df['Time(ms)'])
```
## Fig 2. Square root of sort times in milliseconds as a function of the number of elements.
```
sns.scatterplot(x=df['NumberOfElements'], y=df['rt(Time)'])
```
# ------------------------ END ------------------------
code graveyard
```
### This code is for testing
#np.random.seed(123)
#arr7_39 = np.random.randint(low=7, high= 39, size=12)
#print("the array is",arr7_39)
#small = FindSmallest(arr7_39)
#print('the smallest index is at', small[0], 'and has value of', small[1])
#testing = SelectionSort(arr7_39)
#print('the array sorted is:', testing[0])
#print('execution time was: ', testing[1], 'ms')
```
| github_jupyter |
# Now You Code 4: Reddit News Sentiment Analysis
In this assignment you're tasked with performing a sentiment analysis on top Reddit news articles. (`https://www.reddit.com/r/news/top.json`)
You should perform the analysis on the titles only.
Start by getting the Reddit API to work, and extracting a list of titles only. You'll have to research the Reddit API, and can do so here: https://www.reddit.com/dev/api/ The Reddit API requires a custom 'User-Agent' You must specify this in your headers, as explained here: https://github.com/reddit/reddit/wiki/API
After you get Reddit working move on to sentiment analysis. Once again, we will use (`http://text-processing.com/api/sentiment/`) like we did in the in-class coding lab.
We will start by writing the `GetRedditStories` and `GetSentiment` functions, then putting it all together.
## Step 1: Problem Analysis for `GetRedditStories`
First let's write a function `GetRedditStories` to get the top news articles from the http://www.reddit.com site.
Inputs: None
Outputs: the top `stories` as a Python object converted from JSON
Algorithm (Steps in Program):
```
todo write algorithm here
```
```
# Step 2: write code
import requests
def GetRedditStories():
# todo write code return a list of dict of stories
# testing
GetRedditStories() # you should see some stories
```
## Step 3: Problem Analysis for `GetSentiment`
Now let's write a function, that when given `text` will return the sentiment score for the text. We will use http://text-processing.com 's API for this.
Inputs: `text` string
Outputs: a Python dictionary of sentiment information based on `text`
Algorithm (Steps in Program):
```
todo write algorithm here
```
```
# Step 4: write code
def GetSentiment(text):
# todo write code to return dict of sentiment for text
# testing
GetSentiment("You are a bad, bad man!")
```
## Step 5: Problem Analysis for entire program
Now let's write entire program. This program should take the titles of the Reddit stories and for each one run sentiment analysis on it. It should output the sentiment label and story title, like this:
Example Run (Your output will vary as news stories change...)
```
neutral : FBI Chief Comey 'Rejects' Phone Tap Allegation
pos : New Peeps-flavored Oreos reportedly turning people's poop pink
neutral : President Trump Signs Revised Travel Ban Executive Order
neutral : Police: Overdose survivors to be charged with misdemeanor
neutral : Struggling students forced to wait 3-4 weeks as Utah's public colleges don't have enough mental health therapists
neutral : Army Veteran Faces Possible Deportation to Mexico
neutral : Rep. Scott Taylor called out at town hall for ‘blocking’ constituents on social media
neutral : GM to suspend third shift at Delta Township plant, layoff 1,100 workers
neutral : American citizen Khizr Khan reportedly cancels trip to Canada after being warned his 'travel privileges are being reviewed'
neg : Mars far more likely to have had life than we thought, researchers find after new water discovery
neutral : Bird Flu Found at U.S. Farm That Supplies Chickens to Tyson
neutral : Investigation Reveals Huge Volume of Shark Fins Evading International Shipping Bans
neg : Sikh man's shooting in Washington investigated as hate crime
```
### Problem Analysis
Inputs: (Reads current stories from Reddit)
Outputs: Sentiment Label and story title for each story.
Algorithm (Steps in Program):
```
todo write algorithm here
```
```
## Step 6 Write final program here using the functions
## you wrote in the previous steps!
```
## Step 7: Questions
1. What happens to this program when you do not have connectivity to the Internet? How can this code be modified to correct the issue?
2. Most of the news stories come back with a neutral sentiment score. Does this surprise you? Explain your answer.
3. In what ways can this program be made better / more useful?
## Reminder of Evaluation Criteria
1. What the problem attempted (analysis, code, and answered questions) ?
2. What the problem analysis thought out? (does the program match the plan?)
3. Does the code execute without syntax error?
4. Does the code solve the intended problem?
5. Is the code well written? (easy to understand, modular, and self-documenting, handles errors)
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Custom Federated Algorithms, Part 1: Introduction to the Federated Core
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/federated/tutorials/custom_federated_algorithms_1"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/federated/blob/master/docs/tutorials/custom_federated_algorithms_1.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/federated/blob/master/docs/tutorials/custom_federated_algorithms_1.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
This tutorial is the first part of a two-part series that demonstrates how to
implement custom types of federated algorithms in TensorFlow Federated (TFF)
using the [Federated Core (FC)](../federated_core.md) - a set of lower-level
interfaces that serve as a foundation upon which we have implemented the
[Federated Learning (FL)](../federated_learning.md) layer.
This first part is more conceptual; we introduce some of the key concepts and
programming abstractions used in TFF, and we demonstrate their use on a very
simple example with a distributed array of temperature sensors. In
[the second part of this series](custom_federated_algorithms_2.ipynb), we use
the mechanisms we introduce here to implement a simple version of federated
training and evaluation algorithms. As a follow-up, we encourage you to study
[the implementation](https://github.com/tensorflow/federated/blob/master/tensorflow_federated/python/learning/federated_averaging.py)
of federated averaging in `tff.learning`.
By the end of this series, you should be able to recognize that the applications
of Federated Core are not necessarily limited to learning. The programming
abstractions we offer are quite generic, and could be used, e.g., to implement
analytics and other custom types of computations over distributed data.
Although this tutorial is designed to be self-contained, we encourage you to
first read tutorials on
[image classification](federated_learning_for_image_classification.ipynb) and
[text generation](federated_learning_for_text_generation.ipynb) for a
higher-level and more gentle introduction to the TensorFlow Federated framework
and the [Federated Learning](../federated_learning.md) APIs (`tff.learning`), as
it will help you put the concepts we describe here in context.
## Intended Uses
In a nutshell, Federated Core (FC) is a development environment that makes it
possible to compactly express program logic that combines TensorFlow code with
distributed communication operators, such as those that are used in
[Federated Averaging](https://arxiv.org/abs/1602.05629) - computing
distributed sums, averages, and other types of distributed aggregations over a
set of client devices in the system, broadcasting models and parameters to those
devices, etc.
You may be aware of
[`tf.contrib.distribute`](https://www.tensorflow.org/api_docs/python/tf/contrib/distribute),
and a natural question to ask at this point may be: in what ways does this
framework differ? Both frameworks attempt at making TensorFlow computations
distributed, after all.
One way to think about it is that, whereas the stated goal of
`tf.contrib.distribute` is *to allow users to use existing models and training
code with minimal changes to enable distributed training*, and much focus is on
how to take advantage of distributed infrastructure to make existing training
code more efficient, the goal of TFF's Federated Core is to give researchers and
practitioners explicit control over the specific patterns of distributed
communication they will use in their systems. The focus in FC is on providing a
flexible and extensible language for expressing distributed data flow
algorithms, rather than a concrete set of implemented distributed training
capabilities.
One of the primary target audiences for TFF's FC API is researchers and
practitioners who might want to experiment with new federated learning
algorithms and evaluate the consequences of subtle design choices that affect
the manner in which the flow of data in the distributed system is orchestrated,
yet without getting bogged down by system implementation details. The level of
abstraction that FC API is aiming for roughly corresponds to pseudocode one
could use to describe the mechanics of a federated learning algorithm in a
research publication - what data exists in the system and how it is transformed,
but without dropping to the level of individual point-to-point network message
exchanges.
TFF as a whole is targeting scenarios in which data is distributed, and must
remain such, e.g., for privacy reasons, and where collecting all data at a
centralized location may not be a viable option. This has implication on the
implementation of machine learning algorithms that require an increased degree
of explicit control, as compared to scenarios in which all data can be
accumulated in a centralized location at a data center.
## Before we start
Before we dive into the code, please try to run the following "Hello World"
example to make sure your environment is correctly setup. If it doesn't work,
please refer to the [Installation](../install.md) guide for instructions.
```
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow_federated_nightly
!pip install --quiet --upgrade nest_asyncio
import nest_asyncio
nest_asyncio.apply()
import collections
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
@tff.federated_computation
def hello_world():
return 'Hello, World!'
hello_world()
```
## Federated data
One of the distinguishing features of TFF is that it allows you to compactly
express TensorFlow-based computations on *federated data*. We will be using the
term *federated data* in this tutorial to refer to a collection of data items
hosted across a group of devices in a distributed system. For example,
applications running on mobile devices may collect data and store it locally,
without uploading to a centralized location. Or, an array of distributed sensors
may collect and store temperature readings at their locations.
Federated data like those in the above examples are treated in TFF as
[first-class citizens](https://en.wikipedia.org/wiki/First-class_citizen), i.e.,
they may appear as parameters and results of functions, and they have types. To
reinforce this notion, we will refer to federated data sets as *federated
values*, or as *values of federated types*.
The important point to understand is that we are modeling the entire collection
of data items across all devices (e.g., the entire collection temperature
readings from all sensors in a distributed array) as a single federated value.
For example, here's how one would define in TFF the type of a *federated float*
hosted by a group of client devices. A collection of temperature readings that
materialize across an array of distributed sensors could be modeled as a value
of this federated type.
```
federated_float_on_clients = tff.type_at_clients(tf.float32)
```
More generally, a federated type in TFF is defined by specifying the type `T` of
its *member constituents* - the items of data that reside on individual devices,
and the group `G` of devices on which federated values of this type are hosted
(plus a third, optional bit of information we'll mention shortly). We refer to
the group `G` of devices hosting a federated value as the value's *placement*.
Thus, `tff.CLIENTS` is an example of a placement.
```
str(federated_float_on_clients.member)
str(federated_float_on_clients.placement)
```
A federated type with member constituents `T` and placement `G` can be
represented compactly as `{T}@G`, as shown below.
```
str(federated_float_on_clients)
```
The curly braces `{}` in this concise notation serve as a reminder that the
member constituents (items of data on different devices) may differ, as you
would expect e.g., of temperature sensor readings, so the clients as a group are
jointly hosting a [multi-set](https://en.wikipedia.org/wiki/Multiset) of
`T`-typed items that together constitute the federated value.
It is important to note that the member constituents of a federated value are
generally opaque to the programmer, i.e., a federated value should not be
thought of as a simple `dict` keyed by an identifier of a device in the system -
these values are intended to be collectively transformed only by *federated
operators* that abstractly represent various kinds of distributed communication
protocols (such as aggregation). If this sounds too abstract, don't worry - we
will return to this shortly, and we will illustrate it with concrete examples.
Federated types in TFF come in two flavors: those where the member constituents
of a federated value may differ (as just seen above), and those where they are
known to be all equal. This is controlled by the third, optional `all_equal`
parameter in the `tff.FederatedType` constructor (defaulting to `False`).
```
federated_float_on_clients.all_equal
```
A federated type with a placement `G` in which all of the `T`-typed member
constituents are known to be equal can be compactly represented as `T@G` (as
opposed to `{T}@G`, that is, with the curly braces dropped to reflect the fact
that the multi-set of member constituents consists of a single item).
```
str(tff.type_at_clients(tf.float32, all_equal=True))
```
One example of a federated value of such type that might arise in practical
scenarios is a hyperparameter (such as a learning rate, a clipping norm, etc.)
that has been broadcasted by a server to a group of devices that participate in
federated training.
Another example is a set of parameters for a machine learning model pre-trained
at the server, that were then broadcasted to a group of client devices, where
they can be personalized for each user.
For example, suppose we have a pair of `float32` parameters `a` and `b` for a
simple one-dimensional linear regression model. We can construct the
(non-federated) type of such models for use in TFF as follows. The angle braces
`<>` in the printed type string are a compact TFF notation for named or unnamed
tuples.
```
simple_regression_model_type = (
tff.StructType([('a', tf.float32), ('b', tf.float32)]))
str(simple_regression_model_type)
```
Note that we are only specifying `dtype`s above. Non-scalar types are also
supported. In the above code, `tf.float32` is a shortcut notation for the more
general `tff.TensorType(dtype=tf.float32, shape=[])`.
When this model is broadcasted to clients, the type of the resulting federated
value can be represented as shown below.
```
str(tff.type_at_clients(
simple_regression_model_type, all_equal=True))
```
Per symmetry with *federated float* above, we will refer to such a type as a
*federated tuple*. More generally, we'll often use the term *federated XYZ* to
refer to a federated value in which member constituents are *XYZ*-like. Thus, we
will talk about things like *federated tuples*, *federated sequences*,
*federated models*, and so on.
Now, coming back to `float32@CLIENTS` - while it appears replicated across
multiple devices, it is actually a single `float32`, since all member are the
same. In general, you may think of any *all-equal* federated type, i.e., one of
the form `T@G`, as isomorphic to a non-federated type `T`, since in both cases,
there's actually only a single (albeit potentially replicated) item of type `T`.
Given the isomorphism between `T` and `T@G`, you may wonder what purpose, if
any, the latter types might serve. Read on.
## Placements
### Design Overview
In the preceding section, we've introduced the concept of *placements* - groups
of system participants that might be jointly hosting a federated value, and
we've demonstrated the use of `tff.CLIENTS` as an example specification of a
placement.
To explain why the notion of a *placement* is so fundamental that we needed to
incorporate it into the TFF type system, recall what we mentioned at the
beginning of this tutorial about some of the intended uses of TFF.
Although in this tutorial, you will only see TFF code being executed locally in
a simulated environment, our goal is for TFF to enable writing code that you
could deploy for execution on groups of physical devices in a distributed
system, potentially including mobile or embedded devices running Android. Each
of of those devices would receive a separate set of instructions to execute
locally, depending on the role it plays in the system (an end-user device, a
centralized coordinator, an intermediate layer in a multi-tier architecture,
etc.). It is important to be able to reason about which subsets of devices
execute what code, and where different portions of the data might physically
materialize.
This is especially important when dealing with, e.g., application data on mobile
devices. Since the data is private and can be sensitive, we need the ability to
statically verify that this data will never leave the device (and prove facts
about how the data is being processed). The placement specifications are one of
the mechanisms designed to support this.
TFF has been designed as a data-centric programming environment, and as such,
unlike some of the existing frameworks that focus on *operations* and where
those operations might *run*, TFF focuses on *data*, where that data
*materializes*, and how it's being *transformed*. Consequently, placement is
modeled as a property of data in TFF, rather than as a property of operations on
data. Indeed, as you're about to see in the next section, some of the TFF
operations span across locations, and run "in the network", so to speak, rather
than being executed by a single machine or a group of machines.
Representing the type of a certain value as `T@G` or `{T}@G` (as opposed to just
`T`) makes data placement decisions explicit, and together with a static
analysis of programs written in TFF, it can serve as a foundation for providing
formal privacy guarantees for sensitive on-device data.
An important thing to note at this point, however, is that while we encourage
TFF users to be explicit about *groups* of participating devices that host the
data (the placements), the programmer will never deal with the raw data or
identities of the *individual* participants.
(Note: While it goes far outside the scope of this tutorial, we should mention
that there is one notable exception to the above, a `tff.federated_collect`
operator that is intended as a low-level primitive, only for specialized
situations. Its explicit use in situations where it can be avoided is not
recommended, as it may limit the possible future applications. For example, if
during the course of static analysis, we determine that a computation uses such
low-level mechanisms, we may disallow its access to certain types of data.)
Within the body of TFF code, by design, there's no way to enumerate the devices
that constitute the group represented by `tff.CLIENTS`, or to probe for the
existence of a specific device in the group. There's no concept of a device or
client identity anywhere in the Federated Core API, the underlying set of
architectural abstractions, or the core runtime infrastructure we provide to
support simulations. All the computation logic you write will be expressed as
operations on the entire client group.
Recall here what we mentioned earlier about values of federated types being
unlike Python `dict`, in that one cannot simply enumerate their member
constituents. Think of values that your TFF program logic manipulates as being
associated with placements (groups), rather than with individual participants.
Placements *are* designed to be a first-class citizen in TFF as well, and can
appear as parameters and results of a `placement` type (to be represented by
`tff.PlacementType` in the API). In the future, we plan to provide a variety of
operators to transform or combine placements, but this is outside the scope of
this tutorial. For now, it suffices to think of `placement` as an opaque
primitive built-in type in TFF, similar to how `int` and `bool` are opaque
built-in types in Python, with `tff.CLIENTS` being a constant literal of this
type, not unlike `1` being a constant literal of type `int`.
### Specifying Placements
TFF provides two basic placement literals, `tff.CLIENTS` and `tff.SERVER`, to
make it easy to express the rich variety of practical scenarios that are
naturally modeled as client-server architectures, with multiple *client* devices
(mobile phones, embedded devices, distributed databases, sensors, etc.)
orchestrated by a single centralized *server* coordinator. TFF is designed to
also support custom placements, multiple client groups, multi-tiered and other,
more general distributed architectures, but discussing them is outside the scope
of this tutorial.
TFF doesn't prescribe what either the `tff.CLIENTS` or the `tff.SERVER` actually
represent.
In particular, `tff.SERVER` may be a single physical device (a member of a
singleton group), but it might just as well be a group of replicas in a
fault-tolerant cluster running state machine replication - we do not make any
special architectural assumptions. Rather, we use the `all_equal` bit mentioned
in the preceding section to express the fact that we're generally dealing with
only a single item of data at the server.
Likewise, `tff.CLIENTS` in some applications might represent all clients in the
system - what in the context of federated learning we sometimes refer to as the
*population*, but e.g., in
[production implementations of Federated Averaging](https://arxiv.org/abs/1602.05629),
it may represent a *cohort* - a subset of the clients selected for paticipation
in a particular round of training. The abstractly defined placements are given
concrete meaning when a computation in which they appear is deployed for
execution (or simply invoked like a Python function in a simulated environment,
as is demonstrated in this tutorial). In our local simulations, the group of
clients is determined by the federated data supplied as input.
## Federated computations
### Declaring federated computations
TFF is designed as a strongly-typed functional programming environment that
supports modular development.
The basic unit of composition in TFF is a *federated computation* - a section of
logic that may accept federated values as input and return federated values as
output. Here's how you can define a computation that calculates the average of
the temperatures reported by the sensor array from our previous example.
```
@tff.federated_computation(tff.type_at_clients(tf.float32))
def get_average_temperature(sensor_readings):
return tff.federated_mean(sensor_readings)
```
Looking at the above code, at this point you might be asking - aren't there
already decorator constructs to define composable units such as
[`tf.function`](https://www.tensorflow.org/api_docs/python/tf/function)
in TensorFlow, and if so, why introduce yet another one, and how is it
different?
The short answer is that the code generated by the `tff.federated_computation`
wrapper is *neither* TensorFlow, *nor is it* Python - it's a specification of a
distributed system in an internal platform-independent *glue* language. At this
point, this will undoubtedly sound cryptic, but please bear this intuitive
interpretation of a federated computation as an abstract specification of a
distributed system in mind. We'll explain it in a minute.
First, let's play with the definition a bit. TFF computations are generally
modeled as functions - with or without parameters, but with well-defined type
signatures. You can print the type signature of a computation by querying its
`type_signature` property, as shown below.
```
str(get_average_temperature.type_signature)
```
The type signature tells us that the computation accepts a collection of
different sensor readings on client devices, and returns a single average on the
server.
Before we go any further, let's reflect on this for a minute - the input and
output of this computation are *in different places* (on `CLIENTS` vs. at the
`SERVER`). Recall what we said in the preceding section on placements about how
*TFF operations may span across locations, and run in the network*, and what we
just said about federated computations as representing abstract specifications
of distributed systems. We have just a defined one such computation - a simple
distributed system in which data is consumed at client devices, and the
aggregate results emerge at the server.
In many practical scenarios, the computations that represent top-level tasks
will tend to accept their inputs and report their outputs at the server - this
reflects the idea that computations might be triggered by *queries* that
originate and terminate on the server.
However, FC API does not impose this assumption, and many of the building blocks
we use internally (including numerous `tff.federated_...` operators you may find
in the API) have inputs and outputs with distinct placements, so in general, you
should not think about a federated computation as something that *runs on the
server* or is *executed by a server*. The server is just one type of participant
in a federated computation. In thinking about the mechanics of such
computations, it's best to always default to the global network-wide
perspective, rather than the perspective of a single centralized coordinator.
In general, functional type signatures are compactly represented as `(T -> U)`
for types `T` and `U` of inputs and outputs, respectively. The type of the
formal parameter (such `sensor_readings` in this case) is specified as the
argument to the decorator. You don't need to specify the type of the result -
it's determined automatically.
Although TFF does offer limited forms of polymorphism, programmers are strongly
encouraged to be explicit about the types of data they work with, as that makes
understanding, debugging, and formally verifying properties of your code easier.
In some cases, explicitly specifying types is a requirement (e.g., polymorphic
computations are currently not directly executable).
### Executing federated computations
In order to support development and debugging, TFF allows you to directly invoke
computations defined this way as Python functions, as shown below. Where the
computation expects a value of a federated type with the `all_equal` bit set to
`False`, you can feed it as a plain `list` in Python, and for federated types
with the `all_equal` bit set to `True`, you can just directly feed the (single)
member constituent. This is also how the results are reported back to you.
```
get_average_temperature([68.5, 70.3, 69.8])
```
When running computations like this in simulation mode, you act as an external
observer with a system-wide view, who has the ability to supply inputs and
consume outputs at any locations in the network, as indeed is the case here -
you supplied client values at input, and consumed the server result.
Now, let's return to a note we made earlier about the
`tff.federated_computation` decorator emitting code in a *glue* language.
Although the logic of TFF computations can be expressed as ordinary functions in
Python (you just need to decorate them with `tff.federated_computation` as we've
done above), and you can directly invoke them with Python arguments just
like any other Python functions in this notebook, behind the scenes, as we noted
earlier, TFF computations are actually *not* Python.
What we mean by this is that when the Python interpreter encounters a function
decorated with `tff.federated_computation`, it traces the statements in this
function's body once (at definition time), and then constructs a
[serialized representation](https://github.com/tensorflow/federated/blob/master/tensorflow_federated/proto/v0/computation.proto)
of the computation's logic for future use - whether for execution, or to be
incorporated as a sub-component into another computation.
You can verify this by adding a print statement, as follows:
```
@tff.federated_computation(tff.type_at_clients(tf.float32))
def get_average_temperature(sensor_readings):
print ('Getting traced, the argument is "{}".'.format(
type(sensor_readings).__name__))
return tff.federated_mean(sensor_readings)
```
You can think of Python code that defines a federated computation similarly to
how you would think of Python code that builds a TensorFlow graph in a non-eager
context (if you're not familiar with the non-eager uses of TensorFlow, think of
your Python code defining a graph of operations to be executed later, but not
actually running them on the fly). The non-eager graph-building code in
TensorFlow is Python, but the TensorFlow graph constructed by this code is
platform-independent and serializable.
Likewise, TFF computations are defined in Python, but the Python statements in
their bodies, such as `tff.federated_mean` in the example weve just shown,
are compiled into a portable and platform-independent serializable
representation under the hood.
As a developer, you don't need to concern yourself with the details of this
representation, as you will never need to directly work with it, but you should
be aware of its existence, the fact that TFF computations are fundamentally
non-eager, and cannot capture arbitrary Python state. Python code contained in a
TFF computation's body is executed at definition time, when the body of the
Python function decorated with `tff.federated_computation` is traced before
getting serialized. It's not retraced again at invocation time (except when the
function is polymorphic; please refer to the documentation pages for details).
You may wonder why we've chosen to introduce a dedicated internal non-Python
representation. One reason is that ultimately, TFF computations are intended to
be deployable to real physical environments, and hosted on mobile or embedded
devices, where Python may not be available.
Another reason is that TFF computations express the global behavior of
distributed systems, as opposed to Python programs which express the local
behavior of individual participants. You can see that in the simple example
above, with the special operator `tff.federated_mean` that accepts data on
client devices, but deposits the results on the server.
The operator `tff.federated_mean` cannot be easily modeled as an ordinary
operator in Python, since it doesn't execute locally - as noted earlier, it
represents a distributed system that coordinates the behavior of multiple system
participants. We will refer to such operators as *federated operators*, to
distinguish them from ordinary (local) operators in Python.
The TFF type system, and the fundamental set of operations supported in the TFF's
language, thus deviates significantly from those in Python, necessitating the
use of a dedicated representation.
### Composing federated computations
As noted above, federated computations and their constituents are best
understood as models of distributed systems, and you can think of composing
federated computations as composing more complex distributed systems from
simpler ones. You can think of the `tff.federated_mean` operator as a kind of
built-in template federated computation with a type signature `({T}@CLIENTS ->
T@SERVER)` (indeed, just like computations you write, this operator also has a
complex structure - under the hood we break it down into simpler operators).
The same is true of composing federated computations. The computation
`get_average_temperature` may be invoked in a body of another Python function
decorated with `tff.federated_computation` - doing so will cause it to be
embedded in the body of the parent, much in the same way `tff.federated_mean`
was embedded in its own body earlier.
An important restriction to be aware of is that bodies of Python functions
decorated with `tff.federated_computation` must consist *only* of federated
operators, i.e., they cannot directly contain TensorFlow operations. For
example, you cannot directly use `tf.nest` interfaces to add a pair of
federated values. TensorFlow code must be confined to blocks of code decorated
with a `tff.tf_computation` discussed in the following section. Only when
wrapped in this manner can the wrapped TensorFlow code be invoked in the body of
a `tff.federated_computation`.
The reasons for this separation are technical (it's hard to trick operators such
as `tf.add` to work with non-tensors) as well as architectural. The language of
federated computations (i.e., the logic constructed from serialized bodies of
Python functions decorated with `tff.federated_computation`) is designed to
serve as a platform-independent *glue* language. This glue language is currently
used to build distributed systems from embedded sections of TensorFlow code
(confined to `tff.tf_computation` blocks). In the fullness of time, we
anticipate the need to embed sections of other, non-TensorFlow logic, such as
relational database queries that might represent input pipelines, all connected
together using the same glue language (the `tff.federated_computation` blocks).
## TensorFlow logic
### Declaring TensorFlow computations
TFF is designed for use with TensorFlow. As such, the bulk of the code you will
write in TFF is likely to be ordinary (i.e., locally-executing) TensorFlow code.
In order to use such code with TFF, as noted above, it just needs to be
decorated with `tff.tf_computation`.
For example, here's how we could implement a function that takes a number and
adds `0.5` to it.
```
@tff.tf_computation(tf.float32)
def add_half(x):
return tf.add(x, 0.5)
```
Once again, looking at this, you may be wondering why we should define another
decorator `tff.tf_computation` instead of simply using an existing mechanism
such as `tf.function`. Unlike in the preceding section, here we are
dealing with an ordinary block of TensorFlow code.
There are a few reasons for this, the full treatment of which goes beyond the
scope of this tutorial, but it's worth naming the main one:
* In order to embed reusable building blocks implemented using TensorFlow code
in the bodies of federated computations, they need to satisfy certain
properties - such as getting traced and serialized at definition time,
having type signatures, etc. This generally requires some form of a
decorator.
In general, we recommend using TensorFlow's native mechanisms for composition,
such as `tf.function`, wherever possible, as the exact manner in
which TFF's decorator interacts with eager functions can be expected to evolve.
Now, coming back to the example code snippet above, the computation `add_half`
we just defined can be treated by TFF just like any other TFF computation. In
particular, it has a TFF type signature.
```
str(add_half.type_signature)
```
Note this type signature does not have placements. TensorFlow computations
cannot consume or return federated types.
You can now also use `add_half` as a building block in other computations . For
example, here's how you can use the `tff.federated_map` operator to apply
`add_half` pointwise to all member constituents of a federated float on client
devices.
```
@tff.federated_computation(tff.type_at_clients(tf.float32))
def add_half_on_clients(x):
return tff.federated_map(add_half, x)
str(add_half_on_clients.type_signature)
```
### Executing TensorFlow computations
Execution of computations defined with `tff.tf_computation` follows the same
rules as those we described for `tff.federated_computation`. They can be invoked
as ordinary callables in Python, as follows.
```
add_half_on_clients([1.0, 3.0, 2.0])
```
Once again, it is worth noting that invoking the computation
`add_half_on_clients` in this manner simulates a distributed process. Data is
consumed on clients, and returned on clients. Indeed, this computation has each
client perform a local action. There is no `tff.SERVER` explicitly mentioned in
this system (even if in practice, orchestrating such processing might involve
one). Think of a computation defined this way as conceptually analogous to the
`Map` stage in `MapReduce`.
Also, keep in mind that what we said in the preceding section about TFF
computations getting serialized at the definition time remains true for
`tff.tf_computation` code as well - the Python body of `add_half_on_clients`
gets traced once at definition time. On subsequent invocations, TFF uses its
serialized representation.
The only difference between Python methods decorated with
`tff.federated_computation` and those decorated with `tff.tf_computation` is
that the latter are serialized as TensorFlow graphs (whereas the former are not
allowed to contain TensorFlow code directly embedded in them).
Under the hood, each method decorated with `tff.tf_computation` temporarily
disables eager execution in order to allow the computation's structure to be
captured. While eager execution is locally disabled, you are welcome to use
eager TensorFlow, AutoGraph, TensorFlow 2.0 constructs, etc., so long as you
write the logic of your computation in a manner such that it can get correctly
serialized.
For example, the following code will fail:
```
try:
# Eager mode
constant_10 = tf.constant(10.)
@tff.tf_computation(tf.float32)
def add_ten(x):
return x + constant_10
except Exception as err:
print (err)
```
The above fails because `constant_10` has already been constructed outside of
the graph that `tff.tf_computation` constructs internally in the body of
`add_ten` during the serialization process.
On the other hand, invoking python functions that modify the current graph when
called inside a `tff.tf_computation` is fine:
```
def get_constant_10():
return tf.constant(10.)
@tff.tf_computation(tf.float32)
def add_ten(x):
return x + get_constant_10()
add_ten(5.0)
```
Note that the serialization mechanisms in TensorFlow are evolving, and we expect
the details of how TFF serializes computations to evolve as well.
### Working with `tf.data.Dataset`s
As noted earlier, a unique feature of `tff.tf_computation`s is that they allows
you to work with `tf.data.Dataset`s defined abstractly as formal parameters by
your code. Parameters to be represented in TensorFlow as data sets need to be
declared using the `tff.SequenceType` constructor.
For example, the type specification `tff.SequenceType(tf.float32)` defines an
abstract sequence of float elements in TFF. Sequences can contain either
tensors, or complex nested structures (we'll see examples of those later). The
concise representation of a sequence of `T`-typed items is `T*`.
```
float32_sequence = tff.SequenceType(tf.float32)
str(float32_sequence)
```
Suppose that in our temperature sensor example, each sensor holds not just one
temperature reading, but multiple. Here's how you can define a TFF computation
in TensorFlow that calculates the average of temperatures in a single local data
set using the `tf.data.Dataset.reduce` operator.
```
@tff.tf_computation(tff.SequenceType(tf.float32))
def get_local_temperature_average(local_temperatures):
sum_and_count = (
local_temperatures.reduce((0.0, 0), lambda x, y: (x[0] + y, x[1] + 1)))
return sum_and_count[0] / tf.cast(sum_and_count[1], tf.float32)
str(get_local_temperature_average.type_signature)
```
In the body of a method decorated with `tff.tf_computation`, formal parameters
of a TFF sequence type are represented simply as objects that behave like
`tf.data.Dataset`, i.e., support the same properties and methods (they are
currently not implemented as subclasses of that type - this may change as the
support for data sets in TensorFlow evolves).
You can easily verify this as follows.
```
@tff.tf_computation(tff.SequenceType(tf.int32))
def foo(x):
return x.reduce(np.int32(0), lambda x, y: x + y)
foo([1, 2, 3])
```
Keep in mind that unlike ordinary `tf.data.Dataset`s, these dataset-like objects
are placeholders. They don't contain any elements, since they represent abstract
sequence-typed parameters, to be bound to concrete data when used in a concrete
context. Support for abstractly-defined placeholder data sets is still somewhat
limited at this point, and in the early days of TFF, you may encounter certain
restrictions, but we won't need to worry about them in this tutorial (please
refer to the documentation pages for details).
When locally executing a computation that accepts a sequence in a simulation
mode, such as in this tutorial, you can feed the sequence as Python list, as
below (as well as in other ways, e.g., as a `tf.data.Dataset` in eager mode, but
for now, we'll keep it simple).
```
get_local_temperature_average([68.5, 70.3, 69.8])
```
Like all other TFF types, sequences like those defined above can use the
`tff.StructType` constructor to define nested structures. For example,
here's how one could declare a computation that accepts a sequence of pairs `A`,
`B`, and returns the sum of their products. We include the tracing statements in
the body of the computation so that you can see how the TFF type signature
translates into the dataset's `output_types` and `output_shapes`.
```
@tff.tf_computation(tff.SequenceType(collections.OrderedDict([('A', tf.int32), ('B', tf.int32)])))
def foo(ds):
print('element_structure = {}'.format(ds.element_spec))
return ds.reduce(np.int32(0), lambda total, x: total + x['A'] * x['B'])
str(foo.type_signature)
foo([{'A': 2, 'B': 3}, {'A': 4, 'B': 5}])
```
The support for using `tf.data.Datasets` as formal parameters is still somewhat
limited and evolving, although functional in simple scenarios such as those used
in this tutorial.
## Putting it all together
Now, let's try again to use our TensorFlow computation in a federated setting.
Suppose we have a group of sensors that each have a local sequence of
temperature readings. We can compute the global temperature average by averaging
the sensors' local averages as follows.
```
@tff.federated_computation(
tff.type_at_clients(tff.SequenceType(tf.float32)))
def get_global_temperature_average(sensor_readings):
return tff.federated_mean(
tff.federated_map(get_local_temperature_average, sensor_readings))
```
Note that this isn't a simple average across all local temperature readings from
all clients, as that would require weighing contributions from different clients
by the number of readings they locally maintain. We leave it as an exercise for
the reader to update the above code; the `tff.federated_mean` operator
accepts the weight as an optional second argument (expected to be a federated
float).
Also note that the input to `get_global_temperature_average` now becomes a
*federated float sequence*. Federated sequences is how we will typically represent
on-device data in federated learning, with sequence elements typically
representing data batches (you will see examples of this shortly).
```
str(get_global_temperature_average.type_signature)
```
Here's how we can locally execute the computation on a sample of data in Python.
Notice that the way we supply the input is now as a `list` of `list`s. The outer
list iterates over the devices in the group represented by `tff.CLIENTS`, and
the inner ones iterate over elements in each device's local sequence.
```
get_global_temperature_average([[68.0, 70.0], [71.0], [68.0, 72.0, 70.0]])
```
This concludes the first part of the tutorial... we encourage you to continue on
to the [second part](custom_federated_algorithms_2.ipynb).
| github_jupyter |
# Animalwatch validator
```
import glob
import os.path as op
import numpy as np
from io import open
from ruamel.yaml import YAML
yaml = YAML(typ="unsafe")
true_annotations_path = "E:\\data\\lynx_lynx\\zdo\\anotace"
annotations_path = "E:\\data\\lynx_lynx\\zdo\\anotace_test"
def evaluate_dir(true_annotations_path, annotations_path):
"""
:param true_annotations_path: path to directory with subdirectories with annotations
:param annotations_path: path to students annotations with no subdirs
"""
true_annotation_files = glob.glob(op.join(true_annotations_path, "**", "*.y*ml"))
score = []
score_failed = []
nok = 0
nerr = 0
print("Score - video file")
print("-------")
for true_annotation_fn in true_annotation_files:
annotation_fn, video_fn = find_annotation(annotations_path, true_annotation_fn)
if annotation_fn is None:
print("0.0 - " + str(video_fn) + " - Annotation not found")
# print("annotations_path: ", annotations_path)
# print("true_annotation_fn: ", true_annotation_fn)
score_failed.append(0.0)
else:
sc = compare_yaml_objs(
get_yaml_obj(annotation_fn),
get_yaml_obj(true_annotation_fn)
)
score.append(sc)
print(str(sc) + " - " + str(video_fn))
print("=======")
score_ok = np.average(score)
print("Score without failed (" + str(len(score)) +"/" + str(len(score) + len(score_failed)) + "): " + str(score_ok))
score.extend(score_failed)
score = np.average(score)
print("Score: " + str(score))
return score
def get_iou(bb1, bb2):
"""
Calculate the Intersection over Union (IoU) of two bounding boxes.
by Martin Thoma
Parameters
----------
bb1 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x1, y1) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
bb2 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x, y) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
Returns
-------
float
in [0, 1]
"""
assert bb1['x1'] < bb1['x2']
assert bb1['y1'] < bb1['y2']
assert bb2['x1'] < bb2['x2']
assert bb2['y1'] < bb2['y2']
# determine the coordinates of the intersection rectangle
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
def get_iou_safe(bb1, bb2):
if len(bb1) == 0 and len(bb2) == 0:
score = 1.0
else:
try:
score = get_iou(bb1, bb2)
except Exception as e:
score = 0.0
return score
def find_annotation(annotations_path, true_annotation_file):
annotation_files = glob.glob(op.join(annotations_path, "*.y*ml"))
true_video_fn = get_video_file_name(true_annotation_file)
found_annotation_fn = []
for filename in annotation_files:
video_fn = get_video_file_name(filename)
found_annotation_fn.append(video_fn)
# print("video_fn: ", video_fn)
if video_fn.upper() == true_video_fn.upper():
return filename, video_fn
print("true_video_fn: ", true_video_fn)
print("found_annotation_fn", found_annotation_fn)
# print("annotation_files: ", annotation_files)
return None, true_video_fn
def compare_bboxes(bboxes1, bboxes2):
scores = []
lbb1 = len(bboxes1)
lbb2 = len(bboxes2)
if lbb1 == 0 and lbb2 == 0:
return 1.0
elif lbb1 == 0:
return 0.0
elif lbb2 == 0:
return 0.0
for bbox1 in bboxes1:
scores_for_one = []
for bbox2 in bboxes2:
scores_for_one.append(get_iou_safe(bbox1, bbox2))
# print(bbox1, bbox2)
scores.append(np.max(scores_for_one))
# print("compare_bboxes ", len(bboxes1), len(bboxes2), scores)
return np.average(scores)
def compare_bboxes_symmetric(bboxes1, bboxes2):
return np.average([
compare_bboxes(bboxes1, bboxes2),
compare_bboxes(bboxes2, bboxes1),
])
def compare_frames(true_yaml_obj, yaml_obj, frame_number):
bb1 = get_bboxes_from_frame(true_yaml_obj, frame_number)
bb2 = get_bboxes_from_frame(yaml_obj, frame_number)
return compare_bboxes_symmetric(bb1, bb2)
def get_frame_number(yaml_obj):
return np.max(list(yaml_obj["frames"]))
def compare_yaml_objs(true_yaml_obj, yaml_obj):
frame_number = int(np.max([get_frame_number(yaml_obj), get_frame_number(true_yaml_obj)]))
scores = []
for i in range(0, frame_number):
scores.append(compare_frames(true_yaml_obj, yaml_obj, frame_number=i))
# print(scores)
return np.average(scores)
def get_yaml_obj(yaml_fn):
with open(yaml_fn, encoding="utf-8") as f:
obj = yaml.load(f)
return obj
def get_video_file_name(yaml_fn):
obj = get_yaml_obj(yaml_fn)
_, video_fn = op.split(obj["path"])
return video_fn
def get_bboxes_from_frame(yaml_obj, frame_number):
# print(frame_number)
if frame_number in yaml_obj["frames"]:
bboxes = yaml_obj["frames"][frame_number]
else:
bboxes = [
# {
# "x1": 0,
# "x2": 0,
# "y1": 0,
# "y2": 0,
# }
]
# print("zero")
return bboxes
# def compare_bboxes(bboxes1, bboxes2):
# scores = []
# lbb1 = len(bboxes1)
# lbb2 = len(bboxes2)
# if lbb1 == 0 and lbb2 == 0:
# return 1.0
# elif lbb1 == 0:
# return 0.0
# elif lbb2 == 0:
# return 0.0
# for bbox1 in bboxes1:
# scores_for_one = []
# for bbox2 in bboxes2:
# scores_for_one.append(get_iou_safe(bbox1, bbox2))
# # print(bbox1, bbox2)
# scores.append(np.max(scores_for_one))
# # print("compare_bboxes ", len(bboxes1), len(bboxes2), scores)
# return np.average(scores)
# def compare_bboxes_symmetric(bboxes1, bboxes2):
# return np.average([
# compare_bboxes(bboxes1, bboxes2),
# compare_bboxes(bboxes2, bboxes1),
# ])
# def compare_frames(true_yaml_obj, yaml_obj, frame_number):
# bb1 = get_bboxes_from_frame(true_yaml_obj, frame_number)
# bb2 = get_bboxes_from_frame(yaml_obj, frame_number)
# return compare_bboxes_symmetric(bb1, bb2)
# def get_frame_number(yaml_obj):
# return np.max(list(yaml_obj["frames"]))
# def compare_yaml_objs(true_yaml_obj, yaml_obj):
# frame_number = int(np.max([get_frame_number(yaml_obj), get_frame_number(true_yaml_obj)]))
# scores = []
# for i in range(0, frame_number):
# scores.append(compare_frames(true_yaml_obj, yaml_obj, frame_number=i))
# # print(scores)
# return np.average(scores)
# def get_yaml_obj(yaml_fn):
# with open(yaml_fn, encoding="utf-8") as f:
# obj = yaml.load(f)
# return obj
# def get_video_file_name(yaml_fn):
# obj = get_yaml_obj(yaml_fn)
# video_fn = obj["path"]
# return video_fn
# def get_bboxes_from_frame(yaml_obj, frame_number):
# # print(frame_number)
# if frame_number in yaml_obj["frames"]:
# bboxes = yaml_obj["frames"][frame_number]
# else:
# bboxes = [
# # {
# # "x1": 0,
# # "x2": 0,
# # "y1": 0,
# # "y2": 0,
# # }
# ]
# # print("zero")
# return bboxes
```
# Example
```
evaluate_dir(true_annotations_path, annotations_path)
```
# Debug tests
```
# filename = "anotace.yaml"
# yaml = YAML(typ="unsafe")
# with open(filename, encoding="utf-8") as f:
# obj = yaml.load(f)
files = glob.glob(op.join(annotations_path, "*.y*ml"))
files[0]
yaml_fn = files[1]
video_fn = get_video_file_name(yaml_fn)
# print(video_fn, yaml_fn)
true_yaml_obj = get_yaml_obj(yaml_fn)
bb1 = true_yaml_obj["frames"][1][0]
bb2 = true_yaml_obj["frames"][5][0]
get_iou(bb1, bb2)
compare_bboxes(get_bboxes_from_frame(true_yaml_obj, 5), get_bboxes_from_frame(true_yaml_obj, 1))
assert(compare_bboxes(
[{'x1': 341, 'x2': 459, 'y1': 417, 'y2': 491}, {'x1': 541, 'x2': 559, 'y1': 517, 'y2': 591}],
[{'x1': 341, 'x2': 459, 'y1': 417, 'y2': 491}, {'x1': 541, 'x2': 559, 'y1': 517, 'y2': 591}]) == 1.0)
compare_bboxes_symmetric(
[{'x1': 341, 'x2': 459, 'y1': 417, 'y2': 491}, {'x1': 541, 'x2': 559, 'y1': 517, 'y2': 591}],
[{'x1': 341, 'x2': 459, 'y1': 417, 'y2': 491}],
)
assert(
compare_bboxes_symmetric(
[{'x1': 341, 'x2': 459, 'y1': 417, 'y2': 491}, {'x1': 541, 'x2': 559, 'y1': 517, 'y2': 591}],
[{'x1': 0, 'x2': 1, 'y1': 0, 'y2': 0}],
) == 0.0
)
# true_yaml_obj
assert(
compare_frames(true_yaml_obj, true_yaml_obj, frame_number=5) == 1
)
assert(
compare_yaml_objs(get_yaml_obj(files[0]), get_yaml_obj(files[0])) == 1
)
assert('E:\\data\\lynx_lynx\\zdo\\anotace_test\\IMAG0021.yaml' in files)
fn1, fn2 = find_annotation(true_annotations_path, files[1])
assert(fn1 is None)
# "asd" == "asd"
# files
```
## Debug 4 files found
```
find_annotation(
r"C:\Users\miros\projects\zdo_lynx_lynx\ZDO_SP_Sosnova_Cincera\Anotace",
r"E:\data\lynx_lynx\zdo\anotace\01\IMAG0017.yaml"
)
# "sdfa".upper()
import os.path as op
_, uu = op.split("uur/safs/asdfsda.avi")
```
| github_jupyter |
# Demos: Lecture 17
## Demo 1: bit flip errors
```
import pennylane as qml
from pennylane import numpy as np
import matplotlib.pyplot as plt
from lecture17_helpers import *
from scipy.stats import unitary_group
dev = qml.device("default.mixed", wires=1)
@qml.qnode(dev)
def prepare_state(U, p):
qml.QubitUnitary(U, wires=0)
qml.BitFlip(p, wires=0)
#qml.DepolarizingChannel(p, wires=0)
return qml.state()
n_samples = 500
original_states = []
flipped_states = []
for _ in range(n_samples):
U = unitary_group.rvs(2)
original_state = prepare_state(U, 0)
flipped_state = prepare_state(U, 0.3)
original_states.append(convert_to_bloch_vector(original_state))
flipped_states.append(convert_to_bloch_vector(flipped_state))
plot_bloch_sphere(original_states)
plot_bloch_sphere(flipped_states)
```
## Demo 2: depolarizing noise
## Demo 3: fidelity and trace distance
$$
F(\rho, \sigma) = \left( \hbox{Tr} \sqrt{\sqrt{\rho}\sigma\sqrt{\rho}} \right)^2
$$
```
from scipy.linalg import sqrtm
def fidelity(rho, sigma):
sqrt_rho = sqrtm(rho)
inner_thing = np.linalg.multi_dot([sqrt_rho, sigma, sqrt_rho])
return np.trace(sqrtm(inner_thing)) ** 2
proj_0 = np.array([[1, 0], [0, 0]])
proj_1 = np.array([[0, 0], [0, 1]])
fidelity(proj_0, proj_0)
fidelity(proj_0, proj_1)
```
$$
T(\rho, \sigma) = \frac{1}{2} \hbox{Tr} \left( \sqrt{(\rho - \sigma)^\dagger (\rho - \sigma)} \right)
$$
```
def trace_distance(rho, sigma):
rms = rho - sigma
inner_thing = np.dot(rms.conj().T, rms)
return 0.5 * np.trace(sqrtm(inner_thing))
U = unitary_group.rvs(2)
p_vals = np.linspace(0, 1, 10)
fids = []
tr_ds = []
for p in p_vals:
original_state = prepare_state(U, 0)
error_state = prepare_state(U, p)
fids.append(fidelity(original_state, error_state))
tr_ds.append(trace_distance(original_state, error_state))
plt.scatter(p_vals, fids)
plt.scatter(p_vals, tr_ds)
```
## Demo 4: VQE for $H_2$ molecule
```
bond_length = 1.3228
symbols = ["H", "H"]
coordinates = np.array([0.0, 0.0, -bond_length/2, 0.0, 0.0, bond_length/2])
H, n_qubits = qml.qchem.molecular_hamiltonian(symbols, coordinates)
print(H)
```
Ground state of $H_2$ looks like:
$$
|\psi_g(\theta)\rangle = \cos(\theta/2) |1100\rangle - \sin(\theta/2) |0011\rangle
$$
```
dev = qml.device("default.qubit", wires=4)
def prepare_ground_state(theta):
qml.PauliX(wires=0)
qml.PauliX(wires=1)
qml.DoubleExcitation(theta, wires=range(4))
return qml.expval(H)
opt = qml.GradientDescentOptimizer(stepsize=0.5)
ideal_qnode = qml.QNode(prepare_ground_state, dev)
theta = np.array(0.0, requires_grad=True)
energies = []
for _ in range(30):
theta, _energy = opt.step_and_cost(ideal_qnode, theta)
energies.append(_energy)
plt.plot(energies)
energies[-1]
theta
```
## Demo 5: VQE on a noisy device
```
from qiskit.test.mock import FakeSantiago
from qiskit.providers.aer import QasmSimulator
from qiskit.providers.aer.noise import NoiseModel
device = QasmSimulator.from_backend(FakeSantiago())
noise_model = NoiseModel.from_backend(device, readout_error=False)
noisy_dev = qml.device(
"qiskit.aer", backend='qasm_simulator', wires=4, shots=10000, noise_model=noise_model
)
noisy_qnode = qml.QNode(prepare_ground_state, noisy_dev)
noisy_qnode(theta)
opt = qml.GradientDescentOptimizer(stepsize=0.5)
theta = np.array(0.0, requires_grad=True)
noisy_energies = []
for it in range(30):
if it % 5 == 0:
print(f"it = {it}")
theta, _energy = opt.step_and_cost(noisy_qnode, theta)
noisy_energies.append(_energy)
plt.scatter(range(30), energies)
plt.scatter(range(30), noisy_energies)
```
## Demo 6: zero-noise extrapolation
| github_jupyter |
Imports
```
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torch.optim as optim
import torchvision.transforms as transforms
from torchvision import models, datasets
from torch.autograd import Variable
import shutil
from torchsummary import summary
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
```
Hyperparameters
```
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
torch.cuda.manual_seed(1337)
batch_size = 100
test_batch_size = 1000
gamma = 0.001
lr = 0.01
prune_rate=0.9
kwargs = {'num_workers': 16, 'pin_memory': True}
```
DataLoaders
```
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data', train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=test_batch_size, shuffle=True, **kwargs)
```
Network Model
```
class sequential_model(nn.Module):
def __init__(self, layers=None):
super(sequential_model, self).__init__()
if layers == None:
layers = [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512]
num_classes = 10
self.feature = self.make_layers(layers)
self.classifier = nn.Linear(layers[-1], num_classes)
def make_layers(self, structure):
layers = []
in_channels = 3
for v in structure:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1, bias=False)
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def forward(self, x):
x = self.feature(x)
x = nn.AvgPool2d(2)(x)
x = x.view(x.size(0), -1)
y = self.classifier(x)
return y
```
Train Epoch method
```
def sum_scaling_factors(model):
sum_channel_scaling_factors = 0
#sum absolute value from all channel scaling factors for sparsity
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
sum_channel_scaling_factors += torch.sum(m.weight.data.abs())
return sum_channel_scaling_factors
def train(model, epoch, optimizer, data_loader=train_loader, sparsity=True):
model.train()
#print(data_loader)
for idx, (data, target) in enumerate(data_loader):
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
if sparsity:
sum_channel_scaling_factors = sum_scaling_factors(model)
loss = F.cross_entropy(output, target) + gamma * sum_channel_scaling_factors
else:
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
"""if idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.1f}%)]\tLoss: {:.6f}'.format(
epoch, idx * len(data), len(data_loader.dataset),
100. * idx / len(data_loader), loss.data.item()))"""
```
Validation Method
```
#returns precision and loss of model
def test(model, data_loader=test_loader):
model.eval()
test_loss = 0
correct = 0
for data, target in data_loader:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target, size_average=False).data.item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(data_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.1f}%)\n'.format(
test_loss, correct, len(data_loader.dataset),
100. * correct / len(data_loader.dataset)))
return (correct / float(len(data_loader.dataset)), test_loss)
```
Save Model Method
```
def save_checkpoint(state, is_best, filename='model_best'):
torch.save(state, filename + '_checkpoint.pth.tar')
if is_best:
shutil.copyfile(filename + '_checkpoint.pth.tar', filename + '.pth.tar')
```
Train network method
```
def train_model(model, epochs=10, sparsity=True, filename='best_model'):
model.cuda()
optimizer = optim.Adam(model.parameters(), lr=lr)
best_prec = 0.
for i in range(0, epochs):
train(model, i, optimizer, sparsity=sparsity)
prec, loss = test(model)
is_best = prec > best_prec
best_prec1 = max(prec, best_prec)
save_checkpoint({
'epoch': i + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec,
'optimizer': optimizer.state_dict(),
}, is_best, filename)
return model
```
Load existing Model method
```
def load_model(checkpoint_path="checkpoint_sr.pth.tar", model_path="model_best_sr.pth.tar"):
model = sequential_model()
model.cuda()
if os.path.isfile(model_path):
print("=> loading checkpoint '{}'".format(model_path))
checkpoint_path = torch.load(model_path)
best_prec1 = checkpoint_path['best_prec1']
model.load_state_dict(checkpoint_path['state_dict'])
print("=> loaded checkpoint '{}' (epoch {}) Prec1: {:f}"
.format(model, checkpoint_path['epoch'], best_prec1))
else:
print("=> no checkpoint found at")
return model
```
Select weak channels
```
def selectChannels(model, percent=0.2):
total = 0
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
total += m.weight.data.shape[0]
bn = torch.zeros(total)
index = 0
#print("Typ:")
#print(type(model.modules()))
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
size = m.weight.data.shape[0]
bn[index:(index+size)] = m.weight.data.abs().clone()
index += size
y, i = torch.sort(bn)
thre_index = int(total * percent)
thre = y[thre_index]
pruned = 0
cfg = []
cfg_mask = []
for k, m in enumerate(model.modules()):
if isinstance(m, nn.BatchNorm2d):
weight_copy = m.weight.data.clone()
#print(type(weight_copy.abs().gt(thre).float()))
#mask is a matrix in which 1 marks the channels which are kept and 0 marks the pruned channels
mask = weight_copy.abs().gt(thre).float().cuda()
#pruned is the number of all pruned channels
pruned = pruned + mask.shape[0] - torch.sum(mask)
m.weight.data.mul_(mask)
m.bias.data.mul_(mask)
cfg.append(int(torch.sum(mask)))
cfg_mask.append(mask.clone())
#print('layer index: {:d} \t total channel: {:d} \t remaining channel: {:d}'.
# format(k, mask.shape[0], int(torch.sum(mask))))
elif isinstance(m, nn.MaxPool2d):
cfg.append('M')
return cfg, cfg_mask
"""
Takes a smaller network structure in which the model is transfered.
cfg_mask marks all parameters over model which are transfered or dropped
"""
def transfer_params(cfg, cfg_mask, model):
filtered_cfg = []
#remove all layers with zero or one channel
for elem in cfg:
if type(elem) is int and elem > 1:
filtered_cfg.append(elem)
elif type(elem) is str:
filtered_cfg.append(elem)
cfg = filtered_cfg
newmodel = sequential_model(layers=cfg)
newmodel.cuda()
layer_id_in_cfg = 0
start_mask = torch.ones(3)
end_mask = cfg_mask[layer_id_in_cfg]
skip_linear = False
parameters = newmodel.modules()
layer = next(parameters)
layer = next(parameters)
layer = next(parameters)
skip_next = 0
for m0 in model.modules():
if isinstance(layer, nn.MaxPool2d):
layer = next(parameters)
if skip_next > 0:
skip_next -= 1
continue
if isinstance(m0, nn.BatchNorm2d):
idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))
layer.weight.data = m0.weight.data[idx1].clone()
layer.bias.data = m0.bias.data[idx1].clone()
layer.running_mean = m0.running_mean[idx1].clone()
layer.running_var = m0.running_var[idx1].clone()
layer_id_in_cfg += 1
start_mask = end_mask.clone()
if layer_id_in_cfg < len(cfg_mask): # do not change in Final FC
end_mask = cfg_mask[layer_id_in_cfg]
layer = next(parameters)
elif isinstance(m0, nn.Conv2d):
idx0 = np.squeeze(np.argwhere(np.asarray(start_mask.cpu().numpy())))
idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))
if np.size(idx1) <= 1:
skip_next = 2
layer_id_in_cfg += 1
if layer_id_in_cfg < len(cfg_mask): # do not change in Final FC
end_mask = cfg_mask[layer_id_in_cfg]
continue
#print('In shape: {:d} Out shape:{:d}'.format(idx0.shape[0], idx1.shape[0]))
w = m0.weight.data[:, idx0, :, :].clone()
w = w[idx1, :, :, :].clone()
layer.weight.data = w.clone()
layer = next(parameters)
# m1.bias.data = m0.bias.data[idx1].clone()
elif isinstance(m0, nn.Linear):
idx0 = np.squeeze(np.argwhere(np.asarray(start_mask.cpu().numpy())))
layer.weight.data = m0.weight.data[:, idx0].clone()
#layer = next(parameters)
elif isinstance(m0, nn.ReLU):
layer = next(parameters)
return newmodel
def prune_model(model, percent=0.3):
cfg, cfg_mask = selectChannels(model, percent)
#print(cfg)
prune_model = transfer_params(cfg, cfg_mask, model)
torch.save({'cfg': cfg, 'state_dict': prune_model.state_dict()}, f='pruned_model.pt')
return prune_model
model_sparsity = train_model(sequential_model(), epochs=10, sparsity=True, filename='epochs10_sparsity')
#model = train_model(sequential_model(), epochs=10, sparsity=False, filename='epochs10_no_sparsity')
#model = load_model(checkpoint_path="epochs10_sparsity.pth.tar", model_path="epochs10_sparsity_checkpoint.pth.tar")
model_list = []
fine_tune_epochs = 3
steps = 20
for i in range(1, steps):
print("Pruning ratio: " + str(i/steps))
raw_pruned = prune_model(model_sparsity, i/steps)
test_error = float(test(raw_pruned)[0])
fine_tuned = train_model(raw_pruned, epochs=fine_tune_epochs)
test_error_fine_tuned = float(test(fine_tuned)[0])
model_list.append({'model': fine_tuned, 'test_error': test_error,
'fine_tuned_error': test_error_fine_tuned, 'prune_ratio': i/steps,
'fine_tune_epochs': fine_tune_epochs})
test_error = []
fine_tuned_error = []
prune_ratio = []
num_parameters = []
for prune_set in model_list:
test_error.append(prune_set['test_error'])
fine_tuned_error.append(prune_set['fine_tuned_error'])
prune_ratio.append(prune_set['prune_ratio'])
num_parameters.append(sum(p.numel() for p in prune_set['model'].parameters()))
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.plot(prune_ratio, fine_tuned_error, color='tab:red', label='fine tuned acc')
ax1.plot(prune_ratio, test_error, color='tab:green', label='raw acc')
ax1.set_xlabel('prune ratio')
ax1.set_ylabel('fine tuned accuracy')
ax1.tick_params(axis='y', labelcolor=color)
plt.legend()
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('num param', color=color) # we already handled the x-label with ax1
ax2.plot(prune_ratio, num_parameters, color=color, label='amount parameters')
ax2.tick_params(axis='y', labelcolor=color)
ax2.set_yscale('linear')
plt.legend()
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.grid(axis='both', color='black', linestyle=':', linewidth=1)
plt.show()
result = zip(prune_ratio, num_parameters, fine_tuned_error)
for ratio, num, err in result:
print('ratio: {:f}, error: {:f}, param:{:d}'.format(ratio, err, num))
"""safed = torch.load('pruned_model.pt')
structure = safed['cfg']
weights = safed['state_dict']
pruned_model = sequential_model(structure)
pruned_model.load_state_dict(weights)
pruned_model.cuda()
prec, loss = test(pruned_model)"""
summary(model_list[18]['model'], (3, 32, 32))
```
| github_jupyter |
# Credit Risk Resampling Techniques
```
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
from pathlib import Path
from collections import Counter
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import confusion_matrix
from imblearn.metrics import classification_report_imbalanced
from imblearn.over_sampling import RandomOverSampler, SMOTE
from imblearn.under_sampling import ClusterCentroids
from imblearn.combine import SMOTEENN
```
# Read the CSV into DataFrame
```
# Load the data
file_path = Path('Resources/lending_data.csv')
df = pd.read_csv(file_path)
df.tail()
```
# Split the Data into Training and Testing
```
# Create our features
X = df.drop(columns='loan_status')
# Create our target
y = df.loan_status.to_frame('loan_status')
# Check the balance of our target values
y['loan_status'].value_counts()
# Create X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = train_test_split(X,
y,
random_state=1,
stratify=y)
X_train.shape
```
## Data Pre-Processing
Scale the training and testing data using the `StandardScaler` from `sklearn`. Remember that when scaling the data, you only scale the features data (`X_train` and `X_testing`).
```
#need to transform homeowner column from string to integer first
encoder = LabelEncoder()
#transform x test and train with encoder
X_train['homeowner'] = encoder.fit_transform(X_train['homeowner'])
X_test['homeowner'] = encoder.transform(X_test['homeowner'])
# Create the StandardScaler instance
scaler = StandardScaler()
# Fit the Standard Scaler with the training data
# When fitting scaling functions, only train on the training dataset
X_scaler = scaler.fit(X_train)
# Scale the training and testing data
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
```
# Simple Logistic Regression
```
#simple logistic regression
model = LogisticRegression(solver='lbfgs', random_state=1)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# Calculated the balanced accuracy score
balanced_accuracy_score(y_test, y_pred)
# Display the confusion matrix
confusion_matrix(y_test, y_pred)
# Print the imbalanced classification report
print(classification_report_imbalanced(y_test, y_pred))
```
# Oversampling
In this section, you will compare two oversampling algorithms to determine which algorithm results in the best performance. You will oversample the data using the naive random oversampling algorithm and the SMOTE algorithm. For each algorithm, be sure to complete the folliowing steps:
1. View the count of the target classes using `Counter` from the collections library.
3. Use the resampled data to train a logistic regression model.
3. Calculate the balanced accuracy score from sklearn.metrics.
4. Print the confusion matrix from sklearn.metrics.
5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn.
Note: Use a random state of 1 for each sampling algorithm to ensure consistency between tests
### Naive Random Oversampling
```
# Resample the training data with the RandomOversampler
X_train_resampled, y_train_resampled = RandomOverSampler(random_state=1).fit_resample(X_train_scaled, y_train)
# View the count of target classes with Counter
Counter(y_train_resampled.loan_status)
# Train the Logistic Regression model using the resampled data
model = LogisticRegression(solver='lbfgs', random_state=1)
model.fit(X_train_resampled, y_train_resampled)
# Make predictions using the test data (scaled)
y_pred = model.predict(X_test_scaled)
# Calculated the balanced accuracy score
balanced_accuracy_score(y_test, y_pred)
print(f"Balanced accuracy score using Naive Random Oversampling: {balanced_accuracy_score(y_test, y_pred)}")
# Display the confusion matrix
confusion_matrix(y_test, y_pred)
# Print the imbalanced classification report
print(f"Imbalanced classification report: {classification_report_imbalanced(y_test, y_pred)}")
```
### SMOTE Oversampling
```
# Resample the training data with SMOTE
X_train_resampled, y_train_resampled = SMOTE(random_state=1).fit_resample(X_train_scaled, y_train)
# View the count of target classes with Counter
Counter(y_train_resampled.loan_status)
# Train the Logistic Regression model using the resampled data
model = LogisticRegression(solver='lbfgs', random_state=1)
model.fit(X_train_resampled, y_train_resampled)
# predict
y_pred = model.predict(X_test_scaled)
# Calculate the balanced accuracy score
balanced_accuracy_score(y_test, y_pred)
print(f"Balanced accuracy score using SMOTE: {balanced_accuracy_score(y_test, y_pred)}")
# Display the confusion matrix
confusion_matrix(y_test, y_pred)
# Print the imbalanced classification report
print(f"Imbalanced classification report: {classification_report_imbalanced(y_test, y_pred)}")
```
# Undersampling
In this section, you will test an undersampling algorithm to determine which algorithm results in the best performance compared to the oversampling algorithms above. You will undersample the data using the Cluster Centroids algorithm and complete the folliowing steps:
1. View the count of the target classes using `Counter` from the collections library.
3. Use the resampled data to train a logistic regression model.
3. Calculate the balanced accuracy score from sklearn.metrics.
4. Display the confusion matrix from sklearn.metrics.
5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn.
Note: Use a random state of 1 for each sampling algorithm to ensure consistency between tests
```
# Resample the data using the ClusterCentroids resampler
X_train_resampled, y_train_resampled = ClusterCentroids(random_state=1).fit_resample(X_train_scaled, y_train)
# View the count of target classes with Counter
Counter(y_train_resampled.loan_status)
# Train the Logistic Regression model using the resampled data
model = LogisticRegression(solver='lbfgs', random_state=1)
model.fit(X_train_resampled, y_train_resampled)
#predict
y_pred = model.predict(X_test_scaled)
# Calculate the balanced accuracy score
print(f"Balanced accuracy score using ClusterCentroids: {balanced_accuracy_score(y_test, y_pred)}")
# Display the confusion matrix
confusion_matrix(y_test, y_pred)
# Print the imbalanced classification report
print(f'Imbalanced Classification Report: {classification_report_imbalanced(y_test, y_pred)}')
```
# Combination (Over and Under) Sampling
In this section, you will test a combination over- and under-sampling algorithm to determine if the algorithm results in the best performance compared to the other sampling algorithms above. You will resample the data using the SMOTEENN algorithm and complete the folliowing steps:
1. View the count of the target classes using `Counter` from the collections library.
3. Use the resampled data to train a logistic regression model.
3. Calculate the balanced accuracy score from sklearn.metrics.
4. Display the confusion matrix from sklearn.metrics.
5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn.
Note: Use a random state of 1 for each sampling algorithm to ensure consistency between tests
```
# Resample the training data with SMOTEENN
X_train_resampled, y_train_resampled = SMOTEENN(random_state=1).fit_resample(X_train_scaled, y_train)
# View the count of target classes with Counter
Counter(y_train_resampled.loan_status)
# Train the Logistic Regression model using the resampled data
model = LogisticRegression(solver='lbfgs', random_state=1)
model.fit(X_train_resampled, y_train_resampled)
y_pred = model.predict(X_test_scaled)
# Calculate the balanced accuracy score
print(f"Balanced accuracy score using SMOTEENN: {balanced_accuracy_score(y_test, y_pred)}")
# Display the confusion matrix
confusion_matrix(y_test, y_pred)
# Print the imbalanced classification report
print(classification_report_imbalanced(y_test, y_pred))
```
# Final Questions
1. Which model had the best balanced accuracy score?
>The 3 models with highest scores were;
>SMOTEENN: 0.9946414201183431
>SMOTE: 0.9946414201183431
>Naive Random Oversampling: 0.994641420118343
2. Which model had the best recall score?
>SMOTEENN - HIGH:1.00,LOW:0.99
>SMOTE - HIGH:1.00,LOW:0.99
>NAIVE - HIGH:1.00 ,LOW:0.99
3. Which model had the best geometric mean score?
>The model with the highest mean geo score used ClusterCentroids with an avg / total = 1.00
| github_jupyter |
# Overfitting y Regularización
El **overfitting** o sobreajuste es otro problema común al entrenar un modelo de aprendizaje automático. Consiste en entrenar modelos que aprenden a la perfección los datos de entrenamiento, perdiendo de esta forma generalidad. De modo, que si al modelo se le pasan datos nuevos que jamás ha visto, no será capaz de realizar una buena predicción.
Existe un problema opuesto al overfitting conocido como **underfitting** o subajuste, en el que el modelo no logra realizar una predicción ni siquiera cercana a los datos de entrenamiento y esta lejos de hacer una generalización.

Para evitar el underfitting y el overfitting se pueden utilizar curvas de **loss**, **f1_score** o **accuracy** utilizando los datos de entrenamiento y validación. Haciendo un análisis sobre estas curvas se logra identificar estos problemas.
# Ejercicio
Utilizar el dataset [MNIST](http://yann.lecun.com/exdb/mnist/) para identificar los problemas de **underfitting** y **overfitting**, utilizando una ANN de capas lineales.
```
#-- Descomprimimos el dataset
# !rm -r mnist
# !unzip mnist.zip
#--- Buscamos las direcciones de cada archivo de imagen
from glob import glob
train_files = glob('./mnist/train/*/*.png')
valid_files = glob('./mnist/valid/*/*.png')
test_files = glob('./mnist/test/*/*.png')
train_files[0]
#--- Ordenamos los datos de forma aleatoria para evitar sesgos
import numpy as np
np.random.shuffle(train_files)
np.random.shuffle(valid_files)
np.random.shuffle(test_files)
len(train_files), len(valid_files), len(test_files)
#--- Cargamos los datos de entrenamiento en listas
from PIL import Image
N_train = len(train_files)
X_train = []
Y_train = []
for i, train_file in enumerate(train_files):
Y_train.append( int(train_file.split('/')[3]) )
X_train.append(np.array(Image.open(train_file)))
#--- Cargamos los datos de validación en listas
N_valid = len(valid_files)
X_valid = []
Y_valid = []
for i, valid_file in enumerate(valid_files):
Y_valid.append( int(valid_file.split('/')[3]) )
X_valid.append( np.array(Image.open(valid_file)) )
#--- Cargamos los datos de testeo en listas
N_test = len(test_files)
X_test = []
Y_test = []
for i, test_file in enumerate(test_files):
Y_test.append( int(test_file.split('/')[3]) )
X_test.append( np.array(Image.open(test_file)) )
#--- Visualizamos el tamaño de cada subset
len(X_train), len(X_valid), len(X_test)
#--- Visualizamos la distribución de clases en cada subset
from PIL import Image
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(15,5))
plt.subplot(1,3,1)
plt.hist(np.sort(Y_train))
plt.xlabel('class')
plt.ylabel('counts')
plt.title('Train set')
plt.subplot(1,3,2)
plt.hist(np.sort(Y_valid))
plt.xlabel('class')
plt.ylabel('counts')
plt.title('Valid set')
plt.subplot(1,3,3)
plt.hist(np.sort(Y_test))
plt.xlabel('class')
plt.ylabel('counts')
plt.title('Test set')
plt.show()
#-- Visualizamos los datos
fig = plt.figure(figsize=(8,8))
for i in range(4):
plt.subplot(2,2,i+1)
plt.imshow(X_test[i*15])
plt.title(Y_test[i*15])
plt.axis(False)
plt.show()
#--- Convetimos las listas con los datos a tensores de torch
import torch
from torch.autograd import Variable
X_train = Variable(torch.from_numpy(np.array(X_train))).float()
Y_train = Variable(torch.from_numpy(np.array(Y_train))).long()
X_valid = Variable(torch.from_numpy(np.array(X_valid))).float()
Y_valid = Variable(torch.from_numpy(np.array(Y_valid))).long()
X_test = Variable(torch.from_numpy(np.array(X_test))).float()
Y_test = Variable(torch.from_numpy(np.array(Y_test))).long()
X_train.data.size()
#--- Definimos una función que nos permita entrenar diferentes modelos de ANN
from sklearn.metrics import f1_score
def train_valid(model, n_epoch, optimizer, criterion):
loss_train = []
f1_train = []
acc_train = []
loss_valid = []
f1_valid = []
acc_valid = []
for epoch in range(n_epoch):
model.train()
Xtr = X_train.view(X_train.size(0), -1)
Y_pred = model(Xtr)
loss = criterion(Y_pred,Y_train)
loss_train.append(loss.item())
Y_pred = torch.argmax(Y_pred, 1)
f1_train.append( f1_score(Y_train,Y_pred, average='macro') )
acc = sum(Y_train == Y_pred)/len(Y_pred)
acc_train.append(acc)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print( 'Epoch [{}/{}], loss: {}. f1:{} acc: {} '.format(epoch+1,n_epoch,loss_train[-1], f1_train[-1], acc_train[-1]) )
model.eval()
Xvl = X_valid.view(X_valid.size(0), -1)
Y_pred = model(Xvl)
loss = criterion(Y_pred,Y_valid)
loss_valid.append(loss.item())
Y_pred = torch.argmax(Y_pred, 1)
f1_valid.append( f1_score(Y_valid, Y_pred, average='macro') )
acc = sum(Y_valid == Y_pred)/len(Y_pred)
acc_valid.append(acc)
fig = plt.figure(figsize=(15,5))
plt.subplot(1,3,1)
plt.plot(range(n_epoch), loss_train, label='train')
plt.plot(range(n_epoch), loss_valid, label='valid')
plt.xlabel('n_epoch')
plt.ylabel('loss')
plt.legend()
plt.grid()
plt.subplot(1,3,2)
plt.plot(range(n_epoch), f1_train, label='train')
plt.plot(range(n_epoch), f1_valid, label='valid')
plt.xlabel('n_epoch')
plt.ylabel('f1_score')
plt.legend()
plt.grid()
plt.subplot(1,3,3)
plt.plot(range(n_epoch), acc_train, label='train')
plt.plot(range(n_epoch), acc_valid, label='valid')
plt.xlabel('n_epoch')
plt.ylabel('accuracy')
plt.legend()
plt.grid()
```
## Underfitting
El **underfitting** o sub ajuste se puede presentar en las siguientes situaciones:
* **Finalización temprana**: Cuando el modelo se entrena hasta una época temprana a pesar de que la tendencia indica una posible obtención de mejores resultados.
* **Modelo Simple**: Cuando el modelo es tan básico que no es capaz de extraer ningún tipo de patrón efectivo que le permita hacer una generalización de los datos.
```
#--- Definimos una ANN simple para identificar un error de underfitting
input_dim = 28*28
out_dim = 10
model = torch.nn.Sequential(
torch.nn.Linear(input_dim, out_dim)
)
optimizer = torch.optim.Adam(model.parameters())
criterion = torch.nn.CrossEntropyLoss()
train_valid(model,30,optimizer,criterion)
#-- Evaluamos el modelo entrenado con el set de testeo
model.eval()
Xts = X_test.view(X_test.size(0), -1)
Y_pred = model(Xts)
loss = criterion(Y_pred,Y_test)
Y_pred = torch.argmax(Y_pred, 1)
f1 = f1_score(Y_test, Y_pred, average='macro')
acc = sum(Y_test == Y_pred)/len(Y_pred)
print('loss: {}, f1: {}, acc: {}'.format(loss.item(), f1, acc))
```
## Overfitting
El **overfitting** o sobreajuste es el caso opuesto al subajuste y se puede presentar en la siguiente situación:
una obtención de mejores resultados.
* **Modelo Complejo**: El modelo es tan complejo que aprendió perfectamente los datos de entrenamiento, perdiendo generalidad. Cuando el modelo vea datos nuevos, diferentes a los del entrenamiento, su predicción será errónea.
```
input_dim = 28*28
out_dim = 10
hidden = 60
model = torch.nn.Sequential(
torch.nn.Linear(input_dim, hidden),
torch.nn.ReLU(),
torch.nn.Linear(hidden, hidden),
torch.nn.ReLU(),
torch.nn.Linear(hidden, hidden),
torch.nn.ReLU(),
torch.nn.Linear(hidden, hidden),
torch.nn.ReLU(),
torch.nn.Linear(hidden, hidden),
torch.nn.ReLU(),
torch.nn.Linear(hidden, hidden),
torch.nn.ReLU(),
torch.nn.Linear(hidden, out_dim)
)
optimizer = torch.optim.Adam(model.parameters())
criterion = torch.nn.CrossEntropyLoss()
train_valid(model,100,optimizer,criterion)
#-- Evaluamos el modelo entrenado con el set de testeo
model.eval()
Xts = X_test.view(X_test.size(0), -1)
Y_pred = model(Xts)
loss = criterion(Y_pred,Y_test)
Y_pred = torch.argmax(Y_pred, 1)
f1 = f1_score(Y_test, Y_pred, average='macro')
acc = sum(Y_test == Y_pred)/len(Y_pred)
print('loss: {}, f1: {}, acc: {}'.format(loss.item(), f1, acc))
```
## Regularización
Un mecanismo que permite evitar el sobreajuste es conocido como **regularización**. La cantidad de regularización afectará el rendimiento de validación del modelo. Muy poca regularización no resolverá el problema de sobreajuste. Demasiada regularización hará que el modelo sea mucho menos efectivo. La regularización actúa como una restricción sobre el conjunto de posibles funciones aprendibles.
<br>
Según [Ian Goodfellow](https://en.wikipedia.org/wiki/Ian_Goodfellow), "*La regularización es cualquier modificación que hacemos a un algoritmo de aprendizaje que tiene como objetivo reducir su error de generalización pero no su error de entrenamiento.*"
<br>
**Regularización de caída de peso**
La pérdida de peso es la técnica de regularización más común (implementada en Pytorch). En PyTorch, la caída de peso se proporciona como un parámetro para el optimizador *decay_weight*. En [este](https://pytorch.org/docs/stable/optim.html) enlace se muestran otros parámetros que pueden ser usados en los optimizadores.
A la caída de peso también se le llama:
* L2
* Ridge
Para la disminución de peso, agregamos un término de penalización en la actualización de los pesos:
$w(x) = w(x) − \eta \nabla x - \alpha \eta x$
Este nuevo término en la actualización lleva los parámetros $w$ ligeramente hacia cero, agregando algo de **decaimiento** en los pesos con cada actualización.
```
input_dim = 28*28
out_dim = 10
hidden = 60
model = torch.nn.Sequential(
torch.nn.Linear(input_dim, hidden),
torch.nn.ReLU(),
torch.nn.Linear(hidden, hidden),
torch.nn.ReLU(),
torch.nn.Linear(hidden, hidden),
torch.nn.ReLU(),
torch.nn.Linear(hidden, hidden),
torch.nn.ReLU(),
torch.nn.Linear(hidden, hidden),
torch.nn.ReLU(),
torch.nn.Linear(hidden, hidden),
torch.nn.ReLU(),
torch.nn.Linear(hidden, out_dim)
)
optimizer = torch.optim.Adam(model.parameters(), weight_decay=0.01)
criterion = torch.nn.CrossEntropyLoss()
train_valid(model,100,optimizer,criterion)
#-- Evaluamos el modelo entrenado con el set de testeo
model.eval()
Xts = X_test.view(X_test.size(0), -1)
Y_pred = model(Xts)
loss = criterion(Y_pred,Y_test)
Y_pred = torch.argmax(Y_pred, 1)
f1 = f1_score(Y_test, Y_pred, average='macro')
acc = sum(Y_test == Y_pred)/len(Y_pred)
print('loss: {}, f1: {}, acc: {}'.format(loss.item(), f1, acc))
```
| github_jupyter |
# 概率潜在语义分析
概率潜在语义分析(probabilistic latent semantic analysis, PLSA),也称概率潜在语义索引(probabilistic latent semantic indexing, PLSI),是一种利用概率生成模型对文本集合进行话题分析的无监督学习方法。
模型最大特点是用隐变量表示话题,整个模型表示文本生成话题,话题生成单词,从而得到单词-文本共现数据的过程;假设每个文本由一个话题分布决定,每个话题由一个单词分布决定。
### **18.1.2 生成模型**
假设有单词集合 $W = $ {$w_{1}, w_{2}, ..., w_{M}$}, 其中M是单词个数;文本(指标)集合$D = $ {$d_{1}, d_{2}, ..., d_{N}$}, 其中N是文本个数;话题集合$Z = $ {$z_{1}, z_{2}, ..., z_{K}$},其中$K$是预先设定的话题个数。随机变量 $w$ 取值于单词集合;随机变量 $d$ 取值于文本集合,随机变量 $z$ 取值于话题集合。概率分布 $P(d)$、条件概率分布 $P(z|d)$、条件概率分布 $P(w|z)$ 皆属于多项分布,其中 $P(d)$ 表示生成文本 $d$ 的概率,$P(z|d)$ 表示文本 $d$ 生成话题 $z$ 的概率,$P(w|z)$ 表示话题 $z$ 生成单词 $w$ 的概率。
每个文本 $d$ 拥有自己的话题概率分布 $P(z|d)$,每个话题 $z$ 拥有自己的单词概率分布 $P(w|z)$;也就是说**一个文本的内容由其相关话题决定,一个话题的内容由其相关单词决定**。
生成模型通过以下步骤生成文本·单词共现数据:
(1)依据概率分布 $P(d)$,从文本(指标)集合中随机选取一个文本 $d$ , 共生成 $N$ 个文本;针对每个文本,执行以下操作;
(2)在文本$d$ 给定条件下,依据条件概率分布 $P(z|d)$, 从话题集合随机选取一个话题 $z$, 共生成 $L$ 个话题,这里 $L$ 是文本长度;
(3)在话题 $z$ 给定条件下,依据条件概率分布 $P(w|z)$ , 从单词集合中随机选取一个单词 $w$.
注意这里为叙述方便,假设文本都是等长的,现实中不需要这个假设。
生成模型中, 单词变量 $w$ 与文本变量 $d$ 是观测变量, 话题变量 $z$ 是隐变量, 也就是说模型生成的是单词-话题-文本三元组合 ($w, z ,d$)的集合, 但观测到的单词-文本二元组 ($w, d$)的集合, 观测数据表示为单词-文本矩阵 $T$的形式,矩阵 $T$ 的行表示单词,列表示文本, 元素表示单词-文本对($w, d$)的出现次数。
从数据的生成过程可以推出,文本-单词共现数据$T$的生成概率为所有单词-文本对($w,d$)的生成概率的乘积:
$P(T) = \prod_{w,d}P(w,d)^{n(w,d)}$
这里 $n(w,d)$ 表示 ($w,d$)的出现次数,单词-文本对出现的总次数是 $N*L$。 每个单词-文本对($w,d$)的生成概率由一下公式决定:
$P(w,d) = P(d)P(w|d)$
$= P(d)\sum_{z}P(w,z|d)$
$=P(d)\sum_{z}P(z|d)P(w|z)$
### **18.1.3 共现模型**
$P(w,d) = \sum_{z\in Z}P(z)P(w|z)P(d|z)$
虽然生成模型与共现模型在概率公式意义上是等价的,但是拥有不同的性质。生成模型刻画文本-单词共现数据生成的过程,共现模型描述文本-单词共现数据拥有的模式。
如果直接定义单词与文本的共现概率 $P(w,d)$, 模型参数的个数是 $O(M*N)$, 其中 $M$ 是单词数, $N$ 是文本数。 概率潜在语义分析的生成模型和共现模型的参数个数是 $O(M*K + N*K)$, 其中 $K$ 是话题数。 现实中 $K<<M$, 所以**概率潜在语义分析通过话题对数据进行了更简洁的表示,减少了学习过程中过拟合的可能性**。
### 算法 18.1 (概率潜在语义模型参数估计的EM算法)
输入: 设单词集合为 $W = ${$w_{1}, w_{2},..., w_{M}$}, 文本集合为 $D=${$d_{1}, d_{2},..., d_{N}$}, 话题集合为 $Z=${$z_{1}, z_{2},..., z_{K}$}, 共现数据 $\left \{ n(w_{i}, d_{j}) \right \}, i = 1,2,..., M, j = 1,2,...,N;$
输出: $P(w_{i}|z_{k})$ 和 $P(z_{k}|d_{j})$.
1. 设置参数 $P(w_{i}|z_{k})$ 和 $P(z_{k}|d_{j})$ 的初始值。
2. 迭代执行以下E步,M步,直到收敛为止。
E步:
$P(z_{k}|w_{i},d_{j})=\frac{P(w_{i}|z_{k})P(z_{k}|d_{j})}{\sum_{k=1}^{K}P(w_{i}|z_{k})P(z_{k}|d_{j})}$
M步:
$P(w_{i}|z_{k})=\frac{\sum_{j=1}^{N}n(w_{i},d_{j})P(z_{k}|w_{i},d_{j})}{\sum_{m=1}^{M}\sum_{j=1}^{N}n(w_{m},d_{j})P(z_{k}|w_{m},d_{j})}$
$P(z_{k}|d_{j}) = \frac{\sum_{i=1}^{M}n(w_{i},d_{j})P(z_{k}|w_{i},d_{j})}{n(d_{j})}$
#### 习题 18.3
```
import numpy as np
X = [[0,0,1,1,0,0,0,0,0],
[0,0,0,0,0,1,0,0,1],
[0,1,0,0,0,0,0,1,0],
[0,0,0,0,0,0,1,0,1],
[1,0,0,0,0,1,0,0,0],
[1,1,1,1,1,1,1,1,1],
[1,0,1,0,0,0,0,0,0],
[0,0,0,0,0,0,1,0,1],
[0,0,0,0,0,2,0,0,1],
[1,0,1,0,0,0,0,1,0],
[0,0,0,1,1,0,0,0,0]]
X = np.asarray(X);X
X.shape
X = X.T;X
class PLSA:
def __init__(self, K, max_iter):
self.K = K
self.max_iter = max_iter
def fit(self, X):
n_d, n_w = X.shape
# P(z|w,d)
p_z_dw = np.zeros((n_d, n_w, self.K))
# P(z|d)
p_z_d = np.random.rand(n_d, self.K)
# P(w|z)
p_w_z = np.random.rand(self.K, n_w)
for i_iter in range(self.max_iter):
# E step
for di in range(n_d):
for wi in range(n_w):
sum_zk = np.zeros((self.K))
for zi in range(self.K):
sum_zk[zi] = p_z_d[di, zi] * p_w_z[zi, wi]
sum1 = np.sum(sum_zk)
if sum1 == 0:
sum1 = 1
for zi in range(self.K):
p_z_dw[di, wi, zi] = sum_zk[zi] / sum1
# M step
# update P(z|d)
for di in range(n_d):
for zi in range(self.K):
sum1 = 0.
sum2 = 0.
for wi in range(n_w):
sum1 = sum1 + X[di, wi] * p_z_dw[di, wi, zi]
sum2 = sum2 + X[di, wi]
if sum2 == 0:
sum2 = 1
p_z_d[di, zi] = sum1 / sum2
# update P(w|z)
for zi in range(self.K):
sum2 = np.zeros((n_w))
for wi in range(n_w):
for di in range(n_d):
sum2[wi] = sum2[wi] + X[di, wi] * p_z_dw[di, wi, zi]
sum1 = np.sum(sum2)
if sum1 == 0:
sum1 = 1
for wi in range(n_w):
p_w_z[zi, wi] = sum2[wi] / sum1
return p_w_z, p_z_d
# https://github.com/lipiji/PG_PLSA/blob/master/plsa.py
model = PLSA(2, 100)
p_w_z, p_z_d = model.fit(X)
p_w_z
p_z_d
```
| github_jupyter |
## Training a differentially private LSTM model for name classification
In this tutorial we will build a differentially-private LSTM model to classify names to their source languages, which is the same task as in the tutorial **NLP From Scratch** (https://pytorch.org/tutorials/intermediate/char_rnn_classification_tutorial.html). Since the objective of this tutorial is to demonstrate the effective use of an LSTM with privacy guarantees, we will be utilizing it in place of the bare-bones RNN model defined in the original tutorial. Specifically, we use the `DPLSTM` module from `opacus.layers.dp_lstm` to facilitate calculation of the per-example gradients, which are utilized in the addition of noise during application of differential privacy. `DPLSTM` has the same API and functionality as the `nn.LSTM`, with some restrictions (ex. we currently support single layers, the full list is given below).
## Dataset
First, let us download the dataset of names and their associated language labels as given in https://pytorch.org/tutorials/intermediate/char_rnn_classification_tutorial.html. We train our differentially-private LSTM on the same dataset as in that tutorial.
```
import os
import requests
NAMES_DATASET_URL = "https://download.pytorch.org/tutorial/data.zip"
DATA_DIR = "names"
import zipfile
import urllib
def download_and_extract(dataset_url, data_dir):
print("Downloading and extracting ...")
filename = "data.zip"
urllib.request.urlretrieve(dataset_url, filename)
with zipfile.ZipFile(filename) as zip_ref:
zip_ref.extractall(data_dir)
os.remove(filename)
print("Completed!")
download_and_extract(NAMES_DATASET_URL, DATA_DIR)
names_folder = os.path.join(DATA_DIR, 'data', 'names')
all_filenames = []
for language_file in os.listdir(names_folder):
all_filenames.append(os.path.join(names_folder, language_file))
print(os.listdir(names_folder))
import torch
import torch.nn as nn
class CharByteEncoder(nn.Module):
"""
This encoder takes a UTF-8 string and encodes its bytes into a Tensor. It can also
perform the opposite operation to check a result.
Examples:
>>> encoder = CharByteEncoder()
>>> t = encoder('Ślusàrski') # returns tensor([256, 197, 154, 108, 117, 115, 195, 160, 114, 115, 107, 105, 257])
>>> encoder.decode(t) # returns "<s>Ślusàrski</s>"
"""
def __init__(self):
super().__init__()
self.start_token = "<s>"
self.end_token = "</s>"
self.pad_token = "<pad>"
self.start_idx = 256
self.end_idx = 257
self.pad_idx = 258
def forward(self, s: str, pad_to=0) -> torch.LongTensor:
"""
Encodes a string. It will append a start token <s> (id=self.start_idx) and an end token </s>
(id=self.end_idx).
Args:
s: The string to encode.
pad_to: If not zero, pad by appending self.pad_idx until string is of length `pad_to`.
Defaults to 0.
Returns:
The encoded LongTensor of indices.
"""
encoded = s.encode()
n_pad = pad_to - len(encoded) if pad_to > len(encoded) else 0
return torch.LongTensor(
[self.start_idx]
+ [c for c in encoded] # noqa
+ [self.end_idx]
+ [self.pad_idx for _ in range(n_pad)]
)
def decode(self, char_ids_tensor: torch.LongTensor) -> str:
"""
The inverse of `forward`. Keeps the start, end and pad indices.
"""
char_ids = char_ids_tensor.cpu().detach().tolist()
out = []
buf = []
for c in char_ids:
if c < 256:
buf.append(c)
else:
if buf:
out.append(bytes(buf).decode())
buf = []
if c == self.start_idx:
out.append(self.start_token)
elif c == self.end_idx:
out.append(self.end_token)
elif c == self.pad_idx:
out.append(self.pad_token)
if buf: # in case some are left
out.append(bytes(buf).decode())
return "".join(out)
def __len__(self):
"""
The length of our encoder space. This is fixed to 256 (one byte) + 3 special chars
(start, end, pad).
Returns:
259
"""
return 259
```
## Training / Validation Set Preparation
```
from torch.nn.utils.rnn import pad_sequence
def padded_collate(batch, padding_idx=0):
x = pad_sequence(
[elem[0] for elem in batch], batch_first=True, padding_value=padding_idx
)
y = torch.stack([elem[1] for elem in batch]).long()
return x, y
from torch.utils.data import Dataset
from pathlib import Path
class NamesDataset(Dataset):
def __init__(self, root):
self.root = Path(root)
self.labels = list({langfile.stem for langfile in self.root.iterdir()})
self.labels_dict = {label: i for i, label in enumerate(self.labels)}
self.encoder = CharByteEncoder()
self.samples = self.construct_samples()
def __getitem__(self, i):
return self.samples[i]
def __len__(self):
return len(self.samples)
def construct_samples(self):
samples = []
for langfile in self.root.iterdir():
label_name = langfile.stem
label_id = self.labels_dict[label_name]
with open(langfile, "r") as fin:
for row in fin:
samples.append(
(self.encoder(row.strip()), torch.tensor(label_id).long())
)
return samples
def label_count(self):
cnt = Counter()
for _x, y in self.samples:
label = self.labels[int(y)]
cnt[label] += 1
return cnt
VOCAB_SIZE = 256 + 3 # 256 alternatives in one byte, plus 3 special characters.
```
We split the dataset into a 80-20 split for training and validation.
```
secure_rng = False
train_split = 0.8
test_every = 5
batch_size = 800
ds = NamesDataset(names_folder)
train_len = int(train_split * len(ds))
test_len = len(ds) - train_len
print(f"{train_len} samples for training, {test_len} for testing")
if secure_rng:
try:
import torchcsprng as prng
except ImportError as e:
msg = (
"To use secure RNG, you must install the torchcsprng package! "
"Check out the instructions here: https://github.com/pytorch/csprng#installation"
)
raise ImportError(msg) from e
generator = prng.create_random_device_generator("/dev/urandom")
else:
generator = None
train_ds, test_ds = torch.utils.data.random_split(
ds, [train_len, test_len], generator=generator
)
from torch.utils.data import DataLoader
from opacus.utils.uniform_sampler import UniformWithReplacementSampler
sample_rate = batch_size / len(train_ds)
train_loader = DataLoader(
train_ds,
num_workers=8,
pin_memory=True,
generator=generator,
batch_sampler=UniformWithReplacementSampler(
num_samples=len(train_ds),
sample_rate=sample_rate,
generator=generator,
),
collate_fn=padded_collate,
)
test_loader = DataLoader(
test_ds,
batch_size=2 * batch_size,
shuffle=False,
num_workers=8,
pin_memory=True,
collate_fn=padded_collate,
)
```
After splitting the dataset into a training and a validation set, we now have to convert the data into a numeric form suitable for training the LSTM model. For each name, we set a maximum sequence length of 15, and if a name is longer than the threshold, we truncate it (this rarely happens this dataset !). If a name is smaller than the threshold, we add a dummy `#` character to pad it to the desired length. We also batch the names in the dataset and set a batch size of 256 for all the experiments in this tutorial. The function `line_to_tensor()` returns a tensor of shape [15, 256] where each element is the index (in `all_letters`) of the corresponding character.
## Training/Evaluation Cycle
The training and the evaluation functions `train()` and `test()` are defined below. During the training loop, the per-example gradients are computed and the parameters are updated subsequent to gradient clipping (to bound their sensitivity) and addition of noise.
```
from statistics import mean
def train(model, criterion, optimizer, train_loader, epoch, device="cuda:0"):
accs = []
losses = []
for x, y in tqdm(train_loader):
x = x.to(device)
y = y.to(device)
logits = model(x)
loss = criterion(logits, y)
loss.backward()
optimizer.step()
optimizer.zero_grad()
preds = logits.argmax(-1)
n_correct = float(preds.eq(y).sum())
batch_accuracy = n_correct / len(y)
accs.append(batch_accuracy)
losses.append(float(loss))
printstr = (
f"\t Epoch {epoch}. Accuracy: {mean(accs):.6f} | Loss: {mean(losses):.6f}"
)
try:
privacy_engine = optimizer.privacy_engine
epsilon, best_alpha = privacy_engine.get_privacy_spent()
printstr += f" | (ε = {epsilon:.2f}, δ = {privacy_engine.target_delta}) for α = {best_alpha}"
except AttributeError:
pass
print(printstr)
return
def test(model, test_loader, privacy_engine, device="cuda:0"):
accs = []
with torch.no_grad():
for x, y in tqdm(test_loader):
x = x.to(device)
y = y.to(device)
preds = model(x).argmax(-1)
n_correct = float(preds.eq(y).sum())
batch_accuracy = n_correct / len(y)
accs.append(batch_accuracy)
printstr = "\n----------------------------\n" f"Test Accuracy: {mean(accs):.6f}"
if privacy_engine:
epsilon, best_alpha = privacy_engine.get_privacy_spent()
printstr += f" (ε = {epsilon:.2f}, δ = {privacy_engine.target_delta}) for α = {best_alpha}"
print(printstr + "\n----------------------------\n")
return
```
## Hyper-parameters
There are two sets of hyper-parameters associated with this model. The first are hyper-parameters which we would expect in any machine learning training, such as the learning rate and batch size. The second set are related to the privacy engine, where for example we define the amount of noise added to the gradients (`noise_multiplier`), and the maximum L2 norm to which the per-sample gradients are clipped (`max_grad_norm`).
```
# Training hyper-parameters
epochs = 50
learning_rate = 2.0
# Privacy engine hyper-parameters
max_per_sample_grad_norm = 1.5
delta = 8e-5
epsilon = 12.0
```
## Model
We define the name classification model in the cell below. Note that it is a simple char-LSTM classifier, where the input characters are passed through an `nn.Embedding` layer, and are subsequently input to the DPLSTM.
```
import torch
from torch import nn
from opacus.layers import DPLSTM
class CharNNClassifier(nn.Module):
def __init__(
self,
embedding_size,
hidden_size,
output_size,
num_lstm_layers=1,
bidirectional=False,
vocab_size=VOCAB_SIZE,
):
super().__init__()
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.output_size = output_size
self.vocab_size = vocab_size
self.embedding = nn.Embedding(vocab_size, embedding_size)
self.lstm = DPLSTM(
embedding_size,
hidden_size,
num_layers=num_lstm_layers,
bidirectional=bidirectional,
batch_first=True,
)
self.out_layer = nn.Linear(hidden_size, output_size)
def forward(self, x, hidden=None):
x = self.embedding(x) # -> [B, T, D]
x, _ = self.lstm(x, hidden) # -> [B, T, H]
x = x[:, -1, :] # -> [B, H]
x = self.out_layer(x) # -> [B, C]
return x
```
We now proceed to instantiate the objects (privacy engine, model and optimizer) for our differentially-private LSTM training. However, the `nn.LSTM` is replaced with a `DPLSTM` module which enables us to calculate per-example gradients.
```
# Set the device to run on a GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Define classifier parameters
embedding_size = 64
hidden_size = 128 # Number of neurons in hidden layer after LSTM
n_lstm_layers = 1
bidirectional_lstm = False
model = CharNNClassifier(
embedding_size,
hidden_size,
len(ds.labels),
n_lstm_layers,
bidirectional_lstm,
).to(device)
```
## Defining the privacy engine, optimizer and loss criterion for the problem
```
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
from opacus import PrivacyEngine
privacy_engine = PrivacyEngine(
model,
sample_rate=sample_rate,
max_grad_norm=max_per_sample_grad_norm,
target_delta=delta,
target_epsilon=epsilon,
epochs=epochs,
secure_rng=secure_rng,
)
privacy_engine.attach(optimizer)
```
## Training the name classifier with privacy
Finally we can start training ! We will be training for 50 epochs iterations (where each epoch corresponds to a pass over the whole dataset). We will be reporting the privacy epsilon every `test_every` epochs. We will also benchmark this differentially-private model against a model without privacy and obtain almost identical performance. Further, the private model trained with Opacus incurs only minimal overhead in training time, with the differentially-private classifier only slightly slower (by a couple of minutes) than the non-private model.
```
from tqdm import tqdm
print("Train stats: \n")
for epoch in tqdm(range(epochs)):
train(model, criterion, optimizer, train_loader, epoch, device=device)
if test_every:
if epoch % test_every == 0:
test(model, test_loader, privacy_engine, device=device)
test(model, test_loader, privacy_engine, device=device)
```
The differentially-private name classification model obtains a test accuracy of 0.73 with an epsilon of just under 12. This shows that we can achieve a good accuracy on this task, with minimal loss of privacy.
## Training the name classifier without privacy
We also run a comparison with a non-private model to see if the performance obtained with privacy is comparable to it. To do this, we keep the parameters such as learning rate and batch size the same, and only define a different instance of the model along with a separate optimizer.
```
model_nodp = CharNNClassifier(
embedding_size,
hidden_size,
len(ds.labels),
n_lstm_layers,
bidirectional_lstm,
).to(device)
optimizer_nodp = torch.optim.SGD(model_nodp.parameters(), lr=0.5)
for epoch in tqdm(range(epochs)):
train(model_nodp, criterion, optimizer_nodp, train_loader, epoch, device=device)
if test_every:
if epoch % test_every == 0:
test(model_nodp, test_loader, None, device=device)
test(model_nodp, test_loader, None, device=device)
```
We run the training loop again, this time without privacy and for the same number of iterations.
The non-private classifier obtains a test accuracy of around 0.75 with the same parameters and number of epochs. We are effectively trading off performance on the name classification task for a lower loss of privacy.
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
Licensed under the Apache License, Version 2.0 (the "License");
```
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# TFP Probabilistic Layers: Variational Auto Encoder
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_VAE.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_VAE.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
In this example we show how to fit a Variational Autoencoder using TFP's "probabilistic layers."
### Dependencies & Prerequisites
```
#@title Install { display-mode: "form" }
TF_Installation = 'TF2 Nightly (GPU)' #@param ['TF2 Nightly (GPU)', 'TF2 Stable (GPU)', 'TF1 Nightly (GPU)', 'TF1 Stable (GPU)','System']
if TF_Installation == 'TF2 Nightly (GPU)':
!pip install -q --upgrade tf-nightly-gpu-2.0-preview
print('Installation of `tf-nightly-gpu-2.0-preview` complete.')
elif TF_Installation == 'TF2 Stable (GPU)':
!pip install -q --upgrade tensorflow-gpu==2.0.0-alpha0
print('Installation of `tensorflow-gpu==2.0.0-alpha0` complete.')
elif TF_Installation == 'TF1 Nightly (GPU)':
!pip install -q --upgrade tf-nightly-gpu
print('Installation of `tf-nightly-gpu` complete.')
elif TF_Installation == 'TF1 Stable (GPU)':
!pip install -q --upgrade tensorflow-gpu
print('Installation of `tensorflow-gpu` complete.')
elif TF_Installation == 'System':
pass
else:
raise ValueError('Selection Error: Please select a valid '
'installation option.')
#@title Install { display-mode: "form" }
TFP_Installation = "Nightly" #@param ["Nightly", "Stable", "System"]
if TFP_Installation == "Nightly":
!pip install -q tfp-nightly
print("Installation of `tfp-nightly` complete.")
elif TFP_Installation == "Stable":
!pip install -q --upgrade tensorflow-probability
print("Installation of `tensorflow-probability` complete.")
elif TFP_Installation == "System":
pass
else:
raise ValueError("Selection Error: Please select a valid "
"installation option.")
#@title Import { display-mode: "form" }
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python import tf2
if not tf2.enabled():
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
assert tf2.enabled()
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
tfk = tf.keras
tfkl = tf.keras.layers
tfpl = tfp.layers
tfd = tfp.distributions
```
### Make things Fast!
Before we dive in, let's make sure we're using a GPU for this demo.
To do this, select "Runtime" -> "Change runtime type" -> "Hardware accelerator" -> "GPU".
The following snippet will verify that we have access to a GPU.
```
if tf.test.gpu_device_name() != '/device:GPU:0':
print('WARNING: GPU device not found.')
else:
print('SUCCESS: Found GPU: {}'.format(tf.test.gpu_device_name()))
```
Note: if for some reason you cannot access a GPU, this colab will still work. (Training will just take longer.)
### Load Dataset
```
datasets, datasets_info = tfds.load(name='mnist',
with_info=True,
as_supervised=False)
def _preprocess(sample):
image = tf.cast(sample['image'], tf.float32) / 255. # Scale to unit interval.
image = image < tf.random.uniform(tf.shape(image)) # Randomly binarize.
return image, image
train_dataset = (datasets['train']
.map(_preprocess)
.batch(256)
.prefetch(tf.data.experimental.AUTOTUNE)
.shuffle(int(10e3)))
eval_dataset = (datasets['test']
.map(_preprocess)
.batch(256)
.prefetch(tf.data.experimental.AUTOTUNE))
```
### VAE Code Golf
#### Specify model.
```
input_shape = datasets_info.features['image'].shape
encoded_size = 16
base_depth = 32
prior = tfd.Independent(tfd.Normal(loc=tf.zeros(encoded_size), scale=1),
reinterpreted_batch_ndims=1)
encoder = tfk.Sequential([
tfkl.InputLayer(input_shape=input_shape),
tfkl.Lambda(lambda x: tf.cast(x, tf.float32) - 0.5),
tfkl.Conv2D(base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(2 * base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(2 * base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(4 * encoded_size, 7, strides=1,
padding='valid', activation=tf.nn.leaky_relu),
tfkl.Flatten(),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(encoded_size),
activation=None),
tfpl.MultivariateNormalTriL(
encoded_size,
activity_regularizer=tfpl.KLDivergenceRegularizer(prior)),
])
decoder = tfk.Sequential([
tfkl.InputLayer(input_shape=[encoded_size]),
tfkl.Reshape([1, 1, encoded_size]),
tfkl.Conv2DTranspose(2 * base_depth, 7, strides=1,
padding='valid', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(2 * base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(2 * base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(filters=1, kernel_size=5, strides=1,
padding='same', activation=None),
tfkl.Flatten(),
tfpl.IndependentBernoulli(input_shape, tfd.Bernoulli.logits),
])
vae = tfk.Model(inputs=encoder.inputs,
outputs=decoder(encoder.outputs[0]))
```
#### Do inference.
```
negloglik = lambda x, rv_x: -rv_x.log_prob(x)
vae.compile(optimizer=tf.optimizers.Adam(learning_rate=1e-3),
loss=negloglik)
vae.fit(train_dataset,
epochs=15,
validation_data=eval_dataset)
```
### Look Ma, No ~~Hands~~Tensors!
```
# We'll just examine ten random digits.
x = next(iter(eval_dataset))[0][:10]
xhat = vae(x)
assert isinstance(xhat, tfd.Distribution)
#@title Image Plot Util
import matplotlib.pyplot as plt
def display_imgs(x, y=None):
if not isinstance(x, (np.ndarray, np.generic)):
x = np.array(x)
plt.ioff()
n = x.shape[0]
fig, axs = plt.subplots(1, n, figsize=(n, 1))
if y is not None:
fig.suptitle(np.argmax(y, axis=1))
for i in xrange(n):
axs.flat[i].imshow(x[i].squeeze(), interpolation='none', cmap='gray')
axs.flat[i].axis('off')
plt.show()
plt.close()
plt.ion()
print('Originals:')
display_imgs(x)
print('Decoded Random Samples:')
display_imgs(xhat.sample())
print('Decoded Modes:')
display_imgs(xhat.mode())
print('Decoded Means:')
display_imgs(xhat.mean())
# Now, let's generate ten never-before-seen digits.
z = prior.sample(10)
xtilde = decoder(z)
assert isinstance(xtilde, tfd.Distribution)
print('Randomly Generated Samples:')
display_imgs(xtilde.sample())
print('Randomly Generated Modes:')
display_imgs(xtilde.mode())
print('Randomly Generated Means:')
display_imgs(xtilde.mean())
```
| github_jupyter |
```
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
from scipy import stats
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import mean_absolute_error
train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
print ("Train data shape:", train.shape)
print ("Test data shape:", test.shape)
train.head()
plt.style.use(style='ggplot')
plt.rcParams['figure.figsize'] = (10, 6)
train.SalePrice.describe()
print ("Skew is:", train.SalePrice.skew())
plt.hist(train.SalePrice, color='blue')
plt.show()
target = np.log(train.SalePrice)
target.skew()
target = np.log(train.SalePrice)
print ("Skew is:", target.skew())
plt.hist(target, color='blue')
plt.show()
numeric_features = train.select_dtypes(include=[np.number])
numeric_features.dtypes
corr = numeric_features.corr()
print (corr['SalePrice'].sort_values(ascending=False)[:5], '\n')
print (corr['SalePrice'].sort_values(ascending=False)[-5:])
train.OverallQual.unique()
quality_pivot = train.pivot_table(index='OverallQual',
values='SalePrice', aggfunc=np.median)
quality_pivot
quality_pivot.plot(kind='bar', color='blue')
plt.xlabel('Overall Quality')
plt.ylabel('Median Sale Price')
plt.xticks(rotation=0)
plt.show()
train = train[train['GarageArea'] < 1200]
nulls = pd.DataFrame(train.isnull().sum().sort_values(ascending=False)[:25])
nulls.columns = ['Null Count']
nulls.index.name = 'Feature'
nulls
print ("Unique values are:", train.MiscFeature.unique())
categoricals = train.select_dtypes(exclude=[np.number])
categoricals.describe()
print ("Original: \n")
print (train.Street.value_counts(), "\n")
train['enc_street'] = pd.get_dummies(train.Street, drop_first=True)
test['enc_street'] = pd.get_dummies(train.Street, drop_first=True)
print ('Encoded: \n')
print (train.enc_street.value_counts())
condition_pivot = train.pivot_table(index='SaleCondition', values='SalePrice', aggfunc=np.median)
condition_pivot.plot(kind='bar', color='blue')
plt.xlabel('Sale Condition')
plt.ylabel('Median Sale Price')
plt.xticks(rotation=0)
plt.show()
def encode(x):
return 1 if x == 'Partial' else 0
train['enc_condition'] = train.SaleCondition.apply(encode)
test['enc_condition'] = test.SaleCondition.apply(encode)
condition_pivot = train.pivot_table(index='enc_condition', values='SalePrice', aggfunc=np.median)
condition_pivot.plot(kind='bar', color='blue')
plt.xlabel('Encoded Sale Condition')
plt.ylabel('Median Sale Price')
plt.xticks(rotation=0)
plt.show()
data = train.select_dtypes(include=[np.number]).interpolate().dropna()
sum(data.isnull().sum() != 0)
y = np.log(train.SalePrice)
X = data.drop(['SalePrice', 'Id'], axis=1)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42, test_size=.33)
from sklearn import linear_model
lr = linear_model.LinearRegression()
model = lr.fit(X_train, y_train)
print ("R^2 is: \n", model.score(X_test, y_test))
predictions = model.predict(X_test)
from sklearn.metrics import mean_squared_error
print ('RMSE is: \n', mean_squared_error(y_test, predictions))
actual_values = y_test
plt.scatter(predictions, actual_values, alpha=.7,
color='b') #alpha helps to show overlapping data
plt.xlabel('Predicted Price')
plt.ylabel('Actual Price')
plt.title('Linear Regression Model')
plt.show()
try1 = pd.DataFrame()
try1['Id'] = test.Id
feats = test.select_dtypes(
include=[np.number]).drop(['Id'], axis=1).interpolate()
predictions = model.predict(feats)
final_predictions = np.exp(predictions)
print ("Original predictions are: \n", predictions[:5], "\n")
print ("Final predictions are: \n", final_predictions[:5])
try1['SalePrice'] = final_predictions
try1.head()
try1.to_csv('submission1.csv', index=False)
```
| github_jupyter |
# Creating a Sentiment Analysis Web App
## Using PyTorch and SageMaker
_Deep Learning Nanodegree Program | Deployment_
---
Now that we have a basic understanding of how SageMaker works we will try to use it to construct a complete project from end to end. Our goal will be to have a simple web page which a user can use to enter a movie review. The web page will then send the review off to our deployed model which will predict the sentiment of the entered review.
## Instructions
Some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this notebook. You will not need to modify the included code beyond what is requested. Sections that begin with '**TODO**' in the header indicate that you need to complete or implement some portion within them. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `# TODO: ...` comment. Please be sure to read the instructions carefully!
In addition to implementing code, there will be questions for you to answer which relate to the task and your implementation. Each section where you will answer a question is preceded by a '**Question:**' header. Carefully read each question and provide your answer below the '**Answer:**' header by editing the Markdown cell.
> **Note**: Code and Markdown cells can be executed using the **Shift+Enter** keyboard shortcut. In addition, a cell can be edited by typically clicking it (double-click for Markdown cells) or by pressing **Enter** while it is highlighted.
## General Outline
Recall the general outline for SageMaker projects using a notebook instance.
1. Download or otherwise retrieve the data.
2. Process / Prepare the data.
3. Upload the processed data to S3.
4. Train a chosen model.
5. Test the trained model (typically using a batch transform job).
6. Deploy the trained model.
7. Use the deployed model.
For this project, you will be following the steps in the general outline with some modifications.
First, you will not be testing the model in its own step. You will still be testing the model, however, you will do it by deploying your model and then using the deployed model by sending the test data to it. One of the reasons for doing this is so that you can make sure that your deployed model is working correctly before moving forward.
In addition, you will deploy and use your trained model a second time. In the second iteration you will customize the way that your trained model is deployed by including some of your own code. In addition, your newly deployed model will be used in the sentiment analysis web app.
## Step 1: Downloading the data
As in the XGBoost in SageMaker notebook, we will be using the [IMDb dataset](http://ai.stanford.edu/~amaas/data/sentiment/)
> Maas, Andrew L., et al. [Learning Word Vectors for Sentiment Analysis](http://ai.stanford.edu/~amaas/data/sentiment/). In _Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies_. Association for Computational Linguistics, 2011.
```
%mkdir ../data
!wget -O ../data/aclImdb_v1.tar.gz http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -zxf ../data/aclImdb_v1.tar.gz -C ../data
```
## Step 2: Preparing and Processing the data
Also, as in the XGBoost notebook, we will be doing some initial data processing. The first few steps are the same as in the XGBoost example. To begin with, we will read in each of the reviews and combine them into a single input structure. Then, we will split the dataset into a training set and a testing set.
```
import os
import glob
def read_imdb_data(data_dir='../data/aclImdb'):
data = {}
labels = {}
for data_type in ['train', 'test']:
data[data_type] = {}
labels[data_type] = {}
for sentiment in ['pos', 'neg']:
data[data_type][sentiment] = []
labels[data_type][sentiment] = []
path = os.path.join(data_dir, data_type, sentiment, '*.txt')
files = glob.glob(path)
for f in files:
with open(f) as review:
data[data_type][sentiment].append(review.read())
# Here we represent a positive review by '1' and a negative review by '0'
labels[data_type][sentiment].append(1 if sentiment == 'pos' else 0)
assert len(data[data_type][sentiment]) == len(labels[data_type][sentiment]), \
"{}/{} data size does not match labels size".format(data_type, sentiment)
return data, labels
data, labels = read_imdb_data()
print("IMDB reviews: train = {} pos / {} neg, test = {} pos / {} neg".format(
len(data['train']['pos']), len(data['train']['neg']),
len(data['test']['pos']), len(data['test']['neg'])))
```
Now that we've read the raw training and testing data from the downloaded dataset, we will combine the positive and negative reviews and shuffle the resulting records.
```
from sklearn.utils import shuffle
def prepare_imdb_data(data, labels):
"""Prepare training and test sets from IMDb movie reviews."""
#Combine positive and negative reviews and labels
data_train = data['train']['pos'] + data['train']['neg']
data_test = data['test']['pos'] + data['test']['neg']
labels_train = labels['train']['pos'] + labels['train']['neg']
labels_test = labels['test']['pos'] + labels['test']['neg']
#Shuffle reviews and corresponding labels within training and test sets
data_train, labels_train = shuffle(data_train, labels_train)
data_test, labels_test = shuffle(data_test, labels_test)
# Return a unified training data, test data, training labels, test labets
return data_train, data_test, labels_train, labels_test
train_X, test_X, train_y, test_y = prepare_imdb_data(data, labels)
print("IMDb reviews (combined): train = {}, test = {}".format(len(train_X), len(test_X)))
```
Now that we have our training and testing sets unified and prepared, we should do a quick check and see an example of the data our model will be trained on. This is generally a good idea as it allows you to see how each of the further processing steps affects the reviews and it also ensures that the data has been loaded correctly.
```
print(train_X[100])
print(train_y[100])
```
The first step in processing the reviews is to make sure that any html tags that appear should be removed. In addition we wish to tokenize our input, that way words such as *entertained* and *entertaining* are considered the same with regard to sentiment analysis.
```
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import *
import re
from bs4 import BeautifulSoup
def review_to_words(review):
nltk.download("stopwords", quiet=True)
stemmer = PorterStemmer()
text = BeautifulSoup(review, "html.parser").get_text() # Remove HTML tags
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # Convert to lower case
words = text.split() # Split string into words
words = [w for w in words if w not in stopwords.words("english")] # Remove stopwords
words = [PorterStemmer().stem(w) for w in words] # stem
return words
```
The `review_to_words` method defined above uses `BeautifulSoup` to remove any html tags that appear and uses the `nltk` package to tokenize the reviews. As a check to ensure we know how everything is working, try applying `review_to_words` to one of the reviews in the training set.
```
# TODO: Apply review_to_words to a review (train_X[100] or any other review)
review_to_words(train_X[100])
```
**Question:** Above we mentioned that `review_to_words` method removes html formatting and allows us to tokenize the words found in a review, for example, converting *entertained* and *entertaining* into *entertain* so that they are treated as though they are the same word. What else, if anything, does this method do to the input?
**Answer:**
The method below applies the `review_to_words` method to each of the reviews in the training and testing datasets. In addition it caches the results. This is because performing this processing step can take a long time. This way if you are unable to complete the notebook in the current session, you can come back without needing to process the data a second time.
```
import pickle
cache_dir = os.path.join("../cache", "sentiment_analysis") # where to store cache files
os.makedirs(cache_dir, exist_ok=True) # ensure cache directory exists
def preprocess_data(data_train, data_test, labels_train, labels_test,
cache_dir=cache_dir, cache_file="preprocessed_data.pkl"):
"""Convert each review to words; read from cache if available."""
# If cache_file is not None, try to read from it first
cache_data = None
if cache_file is not None:
try:
with open(os.path.join(cache_dir, cache_file), "rb") as f:
cache_data = pickle.load(f)
print("Read preprocessed data from cache file:", cache_file)
except:
pass # unable to read from cache, but that's okay
# If cache is missing, then do the heavy lifting
if cache_data is None:
# Preprocess training and test data to obtain words for each review
#words_train = list(map(review_to_words, data_train))
#words_test = list(map(review_to_words, data_test))
words_train = [review_to_words(review) for review in data_train]
words_test = [review_to_words(review) for review in data_test]
# Write to cache file for future runs
if cache_file is not None:
cache_data = dict(words_train=words_train, words_test=words_test,
labels_train=labels_train, labels_test=labels_test)
with open(os.path.join(cache_dir, cache_file), "wb") as f:
pickle.dump(cache_data, f)
print("Wrote preprocessed data to cache file:", cache_file)
else:
# Unpack data loaded from cache file
words_train, words_test, labels_train, labels_test = (cache_data['words_train'],
cache_data['words_test'], cache_data['labels_train'], cache_data['labels_test'])
return words_train, words_test, labels_train, labels_test
# Preprocess data
train_X, test_X, train_y, test_y = preprocess_data(train_X, test_X, train_y, test_y)
```
## Transform the data
In the XGBoost notebook we transformed the data from its word representation to a bag-of-words feature representation. For the model we are going to construct in this notebook we will construct a feature representation which is very similar. To start, we will represent each word as an integer. Of course, some of the words that appear in the reviews occur very infrequently and so likely don't contain much information for the purposes of sentiment analysis. The way we will deal with this problem is that we will fix the size of our working vocabulary and we will only include the words that appear most frequently. We will then combine all of the infrequent words into a single category and, in our case, we will label it as `1`.
Since we will be using a recurrent neural network, it will be convenient if the length of each review is the same. To do this, we will fix a size for our reviews and then pad short reviews with the category 'no word' (which we will label `0`) and truncate long reviews.
### (TODO) Create a word dictionary
To begin with, we need to construct a way to map words that appear in the reviews to integers. Here we fix the size of our vocabulary (including the 'no word' and 'infrequent' categories) to be `5000` but you may wish to change this to see how it affects the model.
> **TODO:** Complete the implementation for the `build_dict()` method below. Note that even though the vocab_size is set to `5000`, we only want to construct a mapping for the most frequently appearing `4998` words. This is because we want to reserve the special labels `0` for 'no word' and `1` for 'infrequent word'.
```
import numpy as np
def build_dict(data, vocab_size = 5000):
"""Construct and return a dictionary mapping each of the most frequently appearing words to a unique integer."""
# TODO: Determine how often each word appears in `data`. Note that `data` is a list of sentences and that a
# sentence is a list of words.
word_count = {} # A dict storing the words that appear in the reviews along with how often they occur
for sentence in data:
for word in sentence:
if word in word_count:
word_count[word]+=1
else:
word_count[word]=1
# TODO: Sort the words found in `data` so that sorted_words[0] is the most frequently appearing word and
# sorted_words[-1] is the least frequently appearing word.
sorted_words = sorted(word_count, key=word_count.get,reverse=True)
word_dict = {} # This is what we are building, a dictionary that translates words into integers
for idx, word in enumerate(sorted_words[:vocab_size - 2]): # The -2 is so that we save room for the 'no word'
word_dict[word] = idx + 2 # 'infrequent' labels
return word_dict
word_dict = build_dict(train_X)
```
**Question:** What are the five most frequently appearing (tokenized) words in the training set? Does it makes sense that these words appear frequently in the training set?
**Answer:**
```
# TODO: Use this space to determine the five most frequently appearing words in the training set.
count = 0
for word, idx in word_dict.items():
print(word)
count+=1
if count==5:
break;
```
### Save `word_dict`
Later on when we construct an endpoint which processes a submitted review we will need to make use of the `word_dict` which we have created. As such, we will save it to a file now for future use.
```
data_dir = '../data/pytorch' # The folder we will use for storing data
if not os.path.exists(data_dir): # Make sure that the folder exists
os.makedirs(data_dir)
with open(os.path.join(data_dir, 'word_dict.pkl'), "wb") as f:
pickle.dump(word_dict, f)
```
### Transform the reviews
Now that we have our word dictionary which allows us to transform the words appearing in the reviews into integers, it is time to make use of it and convert our reviews to their integer sequence representation, making sure to pad or truncate to a fixed length, which in our case is `500`.
```
def convert_and_pad(word_dict, sentence, pad=500):
NOWORD = 0 # We will use 0 to represent the 'no word' category
INFREQ = 1 # and we use 1 to represent the infrequent words, i.e., words not appearing in word_dict
working_sentence = [NOWORD] * pad
for word_index, word in enumerate(sentence[:pad]):
if word in word_dict:
working_sentence[word_index] = word_dict[word]
else:
working_sentence[word_index] = INFREQ
return working_sentence, min(len(sentence), pad)
def convert_and_pad_data(word_dict, data, pad=500):
result = []
lengths = []
for sentence in data:
converted, leng = convert_and_pad(word_dict, sentence, pad)
result.append(converted)
lengths.append(leng)
return np.array(result), np.array(lengths)
train_X, train_X_len = convert_and_pad_data(word_dict, train_X)
test_X, test_X_len = convert_and_pad_data(word_dict, test_X)
```
As a quick check to make sure that things are working as intended, check to see what one of the reviews in the training set looks like after having been processeed. Does this look reasonable? What is the length of a review in the training set?
```
# Use this cell to examine one of the processed reviews to make sure everything is working as intended.
train_X[0]
```
**Question:** In the cells above we use the `preprocess_data` and `convert_and_pad_data` methods to process both the training and testing set. Why or why not might this be a problem?
**Answer:**
## Step 3: Upload the data to S3
As in the XGBoost notebook, we will need to upload the training dataset to S3 in order for our training code to access it. For now we will save it locally and we will upload to S3 later on.
### Save the processed training dataset locally
It is important to note the format of the data that we are saving as we will need to know it when we write the training code. In our case, each row of the dataset has the form `label`, `length`, `review[500]` where `review[500]` is a sequence of `500` integers representing the words in the review.
```
import pandas as pd
pd.concat([pd.DataFrame(train_y), pd.DataFrame(train_X_len), pd.DataFrame(train_X)], axis=1) \
.to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False)
```
### Uploading the training data
Next, we need to upload the training data to the SageMaker default S3 bucket so that we can provide access to it while training our model.
```
import sagemaker
sagemaker_session = sagemaker.Session()
bucket = sagemaker_session.default_bucket()
prefix = 'sagemaker/sentiment_rnn'
role = sagemaker.get_execution_role()
input_data = sagemaker_session.upload_data(path=data_dir, bucket=bucket, key_prefix=prefix)
```
**NOTE:** The cell above uploads the entire contents of our data directory. This includes the `word_dict.pkl` file. This is fortunate as we will need this later on when we create an endpoint that accepts an arbitrary review. For now, we will just take note of the fact that it resides in the data directory (and so also in the S3 training bucket) and that we will need to make sure it gets saved in the model directory.
## Step 4: Build and Train the PyTorch Model
In the XGBoost notebook we discussed what a model is in the SageMaker framework. In particular, a model comprises three objects
- Model Artifacts,
- Training Code, and
- Inference Code,
each of which interact with one another. In the XGBoost example we used training and inference code that was provided by Amazon. Here we will still be using containers provided by Amazon with the added benefit of being able to include our own custom code.
We will start by implementing our own neural network in PyTorch along with a training script. For the purposes of this project we have provided the necessary model object in the `model.py` file, inside of the `train` folder. You can see the provided implementation by running the cell below.
```
!pygmentize train/model.py
```
The important takeaway from the implementation provided is that there are three parameters that we may wish to tweak to improve the performance of our model. These are the embedding dimension, the hidden dimension and the size of the vocabulary. We will likely want to make these parameters configurable in the training script so that if we wish to modify them we do not need to modify the script itself. We will see how to do this later on. To start we will write some of the training code in the notebook so that we can more easily diagnose any issues that arise.
First we will load a small portion of the training data set to use as a sample. It would be very time consuming to try and train the model completely in the notebook as we do not have access to a gpu and the compute instance that we are using is not particularly powerful. However, we can work on a small bit of the data to get a feel for how our training script is behaving.
```
import torch
import torch.utils.data
# Read in only the first 250 rows
train_sample = pd.read_csv(os.path.join(data_dir, 'train.csv'), header=None, names=None, nrows=250)
# Turn the input pandas dataframe into tensors
train_sample_y = torch.from_numpy(train_sample[[0]].values).float().squeeze()
train_sample_X = torch.from_numpy(train_sample.drop([0], axis=1).values).long()
# Build the dataset
train_sample_ds = torch.utils.data.TensorDataset(train_sample_X, train_sample_y)
# Build the dataloader
train_sample_dl = torch.utils.data.DataLoader(train_sample_ds, batch_size=50)
```
### (TODO) Writing the training method
Next we need to write the training code itself. This should be very similar to training methods that you have written before to train PyTorch models. We will leave any difficult aspects such as model saving / loading and parameter loading until a little later.
```
def train(model, train_loader, epochs, optimizer, loss_fn, device):
for epoch in range(1, epochs + 1):
model.train()
total_loss = 0
for batch in train_loader:
batch_X, batch_y = batch
batch_X = batch_X.to(device)
batch_y = batch_y.to(device)
# TODO: Complete this train method to train the model provided.
model.zero_grad()
output = model.forward(batch_X)
loss = loss_fn(output, batch_y)
loss.backward() #using pytorch library
optimizer.step() #using pytorch library
total_loss += loss.data.item()
print("Epoch: {}, BCELoss: {}".format(epoch, total_loss / len(train_loader)))
```
Supposing we have the training method above, we will test that it is working by writing a bit of code in the notebook that executes our training method on the small sample training set that we loaded earlier. The reason for doing this in the notebook is so that we have an opportunity to fix any errors that arise early when they are easier to diagnose.
```
import torch.optim as optim
from train.model import LSTMClassifier
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTMClassifier(32, 100, 5000).to(device)
optimizer = optim.Adam(model.parameters())
loss_fn = torch.nn.BCELoss()
train(model, train_sample_dl, 5, optimizer, loss_fn, device)
```
In order to construct a PyTorch model using SageMaker we must provide SageMaker with a training script. We may optionally include a directory which will be copied to the container and from which our training code will be run. When the training container is executed it will check the uploaded directory (if there is one) for a `requirements.txt` file and install any required Python libraries, after which the training script will be run.
### (TODO) Training the model
When a PyTorch model is constructed in SageMaker, an entry point must be specified. This is the Python file which will be executed when the model is trained. Inside of the `train` directory is a file called `train.py` which has been provided and which contains most of the necessary code to train our model. The only thing that is missing is the implementation of the `train()` method which you wrote earlier in this notebook.
**TODO**: Copy the `train()` method written above and paste it into the `train/train.py` file where required.
The way that SageMaker passes hyperparameters to the training script is by way of arguments. These arguments can then be parsed and used in the training script. To see how this is done take a look at the provided `train/train.py` file.
```
from sagemaker.pytorch import PyTorch
estimator = PyTorch(entry_point="train.py",
source_dir="train",
role=role,
framework_version='0.4.0',
train_instance_count=1,
train_instance_type='ml.p2.xlarge',
hyperparameters={
'epochs': 10,
'hidden_dim': 200,
})
estimator.fit({'training': input_data})
```
## Step 5: Testing the model
As mentioned at the top of this notebook, we will be testing this model by first deploying it and then sending the testing data to the deployed endpoint. We will do this so that we can make sure that the deployed model is working correctly.
## Step 6: Deploy the model for testing
Now that we have trained our model, we would like to test it to see how it performs. Currently our model takes input of the form `review_length, review[500]` where `review[500]` is a sequence of `500` integers which describe the words present in the review, encoded using `word_dict`. Fortunately for us, SageMaker provides built-in inference code for models with simple inputs such as this.
There is one thing that we need to provide, however, and that is a function which loads the saved model. This function must be called `model_fn()` and takes as its only parameter a path to the directory where the model artifacts are stored. This function must also be present in the python file which we specified as the entry point. In our case the model loading function has been provided and so no changes need to be made.
**NOTE**: When the built-in inference code is run it must import the `model_fn()` method from the `train.py` file. This is why the training code is wrapped in a main guard ( ie, `if __name__ == '__main__':` )
Since we don't need to change anything in the code that was uploaded during training, we can simply deploy the current model as-is.
**NOTE:** When deploying a model you are asking SageMaker to launch an compute instance that will wait for data to be sent to it. As a result, this compute instance will continue to run until *you* shut it down. This is important to know since the cost of a deployed endpoint depends on how long it has been running for.
In other words **If you are no longer using a deployed endpoint, shut it down!**
**TODO:** Deploy the trained model.
```
# TODO: Deploy the trained model
predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')
```
## Step 7 - Use the model for testing
Once deployed, we can read in the test data and send it off to our deployed model to get some results. Once we collect all of the results we can determine how accurate our model is.
```
test_X = pd.concat([pd.DataFrame(test_X_len), pd.DataFrame(test_X)], axis=1)
# We split the data into chunks and send each chunk seperately, accumulating the results.
def predict(data, rows=512):
split_array = np.array_split(data, int(data.shape[0] / float(rows) + 1))
predictions = np.array([])
for array in split_array:
predictions = np.append(predictions, predictor.predict(array))
return predictions
predictions = predict(test_X.values)
predictions = [round(num) for num in predictions]
from sklearn.metrics import accuracy_score
accuracy_score(test_y, predictions)
```
**Question:** How does this model compare to the XGBoost model you created earlier? Why might these two models perform differently on this dataset? Which do *you* think is better for sentiment analysis?
**Answer:**
### (TODO) More testing
We now have a trained model which has been deployed and which we can send processed reviews to and which returns the predicted sentiment. However, ultimately we would like to be able to send our model an unprocessed review. That is, we would like to send the review itself as a string. For example, suppose we wish to send the following review to our model.
```
test_review = 'The simplest pleasures in life are the best, and this film is one of them. Combining a rather basic storyline of love and adventure this movie transcends the usual weekend fair with wit and unmitigated charm.'
```
The question we now need to answer is, how do we send this review to our model?
Recall in the first section of this notebook we did a bunch of data processing to the IMDb dataset. In particular, we did two specific things to the provided reviews.
- Removed any html tags and stemmed the input
- Encoded the review as a sequence of integers using `word_dict`
In order process the review we will need to repeat these two steps.
**TODO**: Using the `review_to_words` and `convert_and_pad` methods from section one, convert `test_review` into a numpy array `test_data` suitable to send to our model. Remember that our model expects input of the form `review_length, review[500]`.
```
# TODO: Convert test_review into a form usable by the model and save the results in test_data
test_data = None
test_data_review_to_words = review_to_words(test_review)
test_data = [np.array(convert_and_pad(word_dict, test_data_review_to_words)[0])]
```
Now that we have processed the review, we can send the resulting array to our model to predict the sentiment of the review.
```
predictor.predict(test_data)
```
Since the return value of our model is close to `1`, we can be certain that the review we submitted is positive.
### Delete the endpoint
Of course, just like in the XGBoost notebook, once we've deployed an endpoint it continues to run until we tell it to shut down. Since we are done using our endpoint for now, we can delete it.
```
estimator.delete_endpoint()
```
## Step 6 (again) - Deploy the model for the web app
Now that we know that our model is working, it's time to create some custom inference code so that we can send the model a review which has not been processed and have it determine the sentiment of the review.
As we saw above, by default the estimator which we created, when deployed, will use the entry script and directory which we provided when creating the model. However, since we now wish to accept a string as input and our model expects a processed review, we need to write some custom inference code.
We will store the code that we write in the `serve` directory. Provided in this directory is the `model.py` file that we used to construct our model, a `utils.py` file which contains the `review_to_words` and `convert_and_pad` pre-processing functions which we used during the initial data processing, and `predict.py`, the file which will contain our custom inference code. Note also that `requirements.txt` is present which will tell SageMaker what Python libraries are required by our custom inference code.
When deploying a PyTorch model in SageMaker, you are expected to provide four functions which the SageMaker inference container will use.
- `model_fn`: This function is the same function that we used in the training script and it tells SageMaker how to load our model.
- `input_fn`: This function receives the raw serialized input that has been sent to the model's endpoint and its job is to de-serialize and make the input available for the inference code.
- `output_fn`: This function takes the output of the inference code and its job is to serialize this output and return it to the caller of the model's endpoint.
- `predict_fn`: The heart of the inference script, this is where the actual prediction is done and is the function which you will need to complete.
For the simple website that we are constructing during this project, the `input_fn` and `output_fn` methods are relatively straightforward. We only require being able to accept a string as input and we expect to return a single value as output. You might imagine though that in a more complex application the input or output may be image data or some other binary data which would require some effort to serialize.
### (TODO) Writing inference code
Before writing our custom inference code, we will begin by taking a look at the code which has been provided.
```
!pygmentize serve/predict.py
```
As mentioned earlier, the `model_fn` method is the same as the one provided in the training code and the `input_fn` and `output_fn` methods are very simple and your task will be to complete the `predict_fn` method. Make sure that you save the completed file as `predict.py` in the `serve` directory.
**TODO**: Complete the `predict_fn()` method in the `serve/predict.py` file.
### Deploying the model
Now that the custom inference code has been written, we will create and deploy our model. To begin with, we need to construct a new PyTorchModel object which points to the model artifacts created during training and also points to the inference code that we wish to use. Then we can call the deploy method to launch the deployment container.
**NOTE**: The default behaviour for a deployed PyTorch model is to assume that any input passed to the predictor is a `numpy` array. In our case we want to send a string so we need to construct a simple wrapper around the `RealTimePredictor` class to accomodate simple strings. In a more complicated situation you may want to provide a serialization object, for example if you wanted to sent image data.
```
from sagemaker.predictor import RealTimePredictor
from sagemaker.pytorch import PyTorchModel
class StringPredictor(RealTimePredictor):
def __init__(self, endpoint_name, sagemaker_session):
super(StringPredictor, self).__init__(endpoint_name, sagemaker_session, content_type='text/plain')
model = PyTorchModel(model_data=estimator.model_data,
role = role,
framework_version='0.4.0',
entry_point='predict.py',
source_dir='serve',
predictor_cls=StringPredictor)
predictor = model.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')
```
### Testing the model
Now that we have deployed our model with the custom inference code, we should test to see if everything is working. Here we test our model by loading the first `250` positive and negative reviews and send them to the endpoint, then collect the results. The reason for only sending some of the data is that the amount of time it takes for our model to process the input and then perform inference is quite long and so testing the entire data set would be prohibitive.
```
import glob
def test_reviews(data_dir='../data/aclImdb', stop=250):
results = []
ground = []
# We make sure to test both positive and negative reviews
for sentiment in ['pos', 'neg']:
path = os.path.join(data_dir, 'test', sentiment, '*.txt')
files = glob.glob(path)
files_read = 0
print('Starting ', sentiment, ' files')
# Iterate through the files and send them to the predictor
for f in files:
with open(f) as review:
# First, we store the ground truth (was the review positive or negative)
if sentiment == 'pos':
ground.append(1)
else:
ground.append(0)
# Read in the review and convert to 'utf-8' for transmission via HTTP
review_input = review.read().encode('utf-8')
# Send the review to the predictor and store the results
results.append(float(predictor.predict(review_input)))
# Sending reviews to our endpoint one at a time takes a while so we
# only send a small number of reviews
files_read += 1
if files_read == stop:
break
return ground, results
ground, results = test_reviews()
from sklearn.metrics import accuracy_score
accuracy_score(ground, results)
```
As an additional test, we can try sending the `test_review` that we looked at earlier.
```
predictor.predict(test_review)
```
Now that we know our endpoint is working as expected, we can set up the web page that will interact with it. If you don't have time to finish the project now, make sure to skip down to the end of this notebook and shut down your endpoint. You can deploy it again when you come back.
## Step 7 (again): Use the model for the web app
> **TODO:** This entire section and the next contain tasks for you to complete, mostly using the AWS console.
So far we have been accessing our model endpoint by constructing a predictor object which uses the endpoint and then just using the predictor object to perform inference. What if we wanted to create a web app which accessed our model? The way things are set up currently makes that not possible since in order to access a SageMaker endpoint the app would first have to authenticate with AWS using an IAM role which included access to SageMaker endpoints. However, there is an easier way! We just need to use some additional AWS services.
<img src="Web App Diagram.svg">
The diagram above gives an overview of how the various services will work together. On the far right is the model which we trained above and which is deployed using SageMaker. On the far left is our web app that collects a user's movie review, sends it off and expects a positive or negative sentiment in return.
In the middle is where some of the magic happens. We will construct a Lambda function, which you can think of as a straightforward Python function that can be executed whenever a specified event occurs. We will give this function permission to send and recieve data from a SageMaker endpoint.
Lastly, the method we will use to execute the Lambda function is a new endpoint that we will create using API Gateway. This endpoint will be a url that listens for data to be sent to it. Once it gets some data it will pass that data on to the Lambda function and then return whatever the Lambda function returns. Essentially it will act as an interface that lets our web app communicate with the Lambda function.
### Setting up a Lambda function
The first thing we are going to do is set up a Lambda function. This Lambda function will be executed whenever our public API has data sent to it. When it is executed it will receive the data, perform any sort of processing that is required, send the data (the review) to the SageMaker endpoint we've created and then return the result.
#### Part A: Create an IAM Role for the Lambda function
Since we want the Lambda function to call a SageMaker endpoint, we need to make sure that it has permission to do so. To do this, we will construct a role that we can later give the Lambda function.
Using the AWS Console, navigate to the **IAM** page and click on **Roles**. Then, click on **Create role**. Make sure that the **AWS service** is the type of trusted entity selected and choose **Lambda** as the service that will use this role, then click **Next: Permissions**.
In the search box type `sagemaker` and select the check box next to the **AmazonSageMakerFullAccess** policy. Then, click on **Next: Review**.
Lastly, give this role a name. Make sure you use a name that you will remember later on, for example `LambdaSageMakerRole`. Then, click on **Create role**.
#### Part B: Create a Lambda function
Now it is time to actually create the Lambda function.
Using the AWS Console, navigate to the AWS Lambda page and click on **Create a function**. When you get to the next page, make sure that **Author from scratch** is selected. Now, name your Lambda function, using a name that you will remember later on, for example `sentiment_analysis_func`. Make sure that the **Python 3.6** runtime is selected and then choose the role that you created in the previous part. Then, click on **Create Function**.
On the next page you will see some information about the Lambda function you've just created. If you scroll down you should see an editor in which you can write the code that will be executed when your Lambda function is triggered. In our example, we will use the code below.
```python
# We need to use the low-level library to interact with SageMaker since the SageMaker API
# is not available natively through Lambda.
import boto3
def lambda_handler(event, context):
# The SageMaker runtime is what allows us to invoke the endpoint that we've created.
runtime = boto3.Session().client('sagemaker-runtime')
# Now we use the SageMaker runtime to invoke our endpoint, sending the review we were given
response = runtime.invoke_endpoint(EndpointName = '**ENDPOINT NAME HERE**', # The name of the endpoint we created
ContentType = 'text/plain', # The data format that is expected
Body = event['body']) # The actual review
# The response is an HTTP response whose body contains the result of our inference
result = response['Body'].read().decode('utf-8')
return {
'statusCode' : 200,
'headers' : { 'Content-Type' : 'text/plain', 'Access-Control-Allow-Origin' : '*' },
'body' : result
}
```
Once you have copy and pasted the code above into the Lambda code editor, replace the `**ENDPOINT NAME HERE**` portion with the name of the endpoint that we deployed earlier. You can determine the name of the endpoint using the code cell below.
```
# We have done part A step, part B change the:
#(1) endpoint name and (2) vocab in lambda_function.py
#After that, try testEvent
predictor.endpoint
```
Once you have added the endpoint name to the Lambda function, click on **Save**. Your Lambda function is now up and running. Next we need to create a way for our web app to execute the Lambda function.
### Setting up API Gateway
Now that our Lambda function is set up, it is time to create a new API using API Gateway that will trigger the Lambda function we have just created.
Using AWS Console, navigate to **Amazon API Gateway** and then click on **Get started**.
On the next page, make sure that **New API** is selected and give the new api a name, for example, `sentiment_analysis_api`. Then, click on **Create API**.
Now we have created an API, however it doesn't currently do anything. What we want it to do is to trigger the Lambda function that we created earlier.
Select the **Actions** dropdown menu and click **Create Method**. A new blank method will be created, select its dropdown menu and select **POST**, then click on the check mark beside it.
For the integration point, make sure that **Lambda Function** is selected and click on the **Use Lambda Proxy integration**. This option makes sure that the data that is sent to the API is then sent directly to the Lambda function with no processing. It also means that the return value must be a proper response object as it will also not be processed by API Gateway.
Type the name of the Lambda function you created earlier into the **Lambda Function** text entry box and then click on **Save**. Click on **OK** in the pop-up box that then appears, giving permission to API Gateway to invoke the Lambda function you created.
The last step in creating the API Gateway is to select the **Actions** dropdown and click on **Deploy API**. You will need to create a new Deployment stage and name it anything you like, for example `prod`.
You have now successfully set up a public API to access your SageMaker model. Make sure to copy or write down the URL provided to invoke your newly created public API as this will be needed in the next step. This URL can be found at the top of the page, highlighted in blue next to the text **Invoke URL**.
## Step 4: Deploying our web app
Now that we have a publicly available API, we can start using it in a web app. For our purposes, we have provided a simple static html file which can make use of the public api you created earlier.
In the `website` folder there should be a file called `index.html`. Download the file to your computer and open that file up in a text editor of your choice. There should be a line which contains **\*\*REPLACE WITH PUBLIC API URL\*\***. Replace this string with the url that you wrote down in the last step and then save the file.
Now, if you open `index.html` on your local computer, your browser will behave as a local web server and you can use the provided site to interact with your SageMaker model.
If you'd like to go further, you can host this html file anywhere you'd like, for example using github or hosting a static site on Amazon's S3. Once you have done this you can share the link with anyone you'd like and have them play with it too!
> **Important Note** In order for the web app to communicate with the SageMaker endpoint, the endpoint has to actually be deployed and running. This means that you are paying for it. Make sure that the endpoint is running when you want to use the web app but that you shut it down when you don't need it, otherwise you will end up with a surprisingly large AWS bill.
**TODO:** Make sure that you include the edited `index.html` file in your project submission.
Now that your web app is working, trying playing around with it and see how well it works.
**Question**: Give an example of a review that you entered into your web app. What was the predicted sentiment of your example review?
**Answer:**
### Delete the endpoint
Remember to always shut down your endpoint if you are no longer using it. You are charged for the length of time that the endpoint is running so if you forget and leave it on you could end up with an unexpectedly large bill.
```
predictor.delete_endpoint()
```
| github_jupyter |
# MNIST distributed training and batch transform
The SageMaker Python SDK helps you deploy your models for training and hosting in optimized, production-ready containers in SageMaker. The SageMaker Python SDK is easy to use, modular, extensible and compatible with TensorFlow and MXNet. This tutorial focuses on how to create a convolutional neural network model to train the [MNIST dataset](http://yann.lecun.com/exdb/mnist/) using TensorFlow distributed training.
## Set up the environment
First, we'll just set up a few things needed for this example
```
import sagemaker
from sagemaker import get_execution_role
from sagemaker.session import Session
sagemaker_session = sagemaker.Session()
region = sagemaker_session.boto_session.region_name
role = get_execution_role()
```
### Download the MNIST dataset
We'll now need to download the MNIST dataset, and upload it to a location in S3 after preparing for training.
```
import utils
from tensorflow.contrib.learn.python.learn.datasets import mnist
import tensorflow as tf
data_sets = mnist.read_data_sets('data', dtype=tf.uint8, reshape=False, validation_size=5000)
utils.convert_to(data_sets.train, 'train', 'data')
utils.convert_to(data_sets.validation, 'validation', 'data')
utils.convert_to(data_sets.test, 'test', 'data')
```
### Upload the data
We use the ```sagemaker.Session.upload_data``` function to upload our datasets to an S3 location. The return value inputs identifies the location -- we will use this later when we start the training job.
```
inputs = sagemaker_session.upload_data(path='data', key_prefix='data/DEMO-mnist')
```
# Construct a script for distributed training
Here is the full code for the network model:
```
!cat 'mnist.py'
```
## Create a training job
```
from sagemaker.tensorflow import TensorFlow
mnist_estimator = TensorFlow(entry_point='mnist.py',
role=role,
framework_version='1.11.0',
training_steps=1000,
evaluation_steps=100,
train_instance_count=2,
train_instance_type='ml.c4.xlarge')
mnist_estimator.fit(inputs)
```
The `fit()` method will create a training job in two ml.c4.xlarge instances. The logs above will show the instances doing training, evaluation, and incrementing the number of training steps.
In the end of the training, the training job will generate a saved model for TF serving.
## SageMaker's transformer class
After training, we use our TensorFlow estimator object to create a `Transformer` by invoking the `transformer()` method. This method takes arguments for configuring our options with the batch transform job; these do not need to be the same values as the one we used for the training job. The method also creates a SageMaker Model to be used for the batch transform jobs.
The `Transformer` class is responsible for running batch transform jobs, which will deploy the trained model to an endpoint and send requests for performing inference.
```
transformer = mnist_estimator.transformer(instance_count=1, instance_type='ml.m4.xlarge')
```
# Perform inference
Now that we've trained a model, we're going to use it to perform inference with a SageMaker batch transform job. The request handling behavior of the Endpoint deployed during the transform job is determined by the `mnist.py` script we looked at earlier.
## Run a batch transform job
For our batch transform job, we're going to use input data that contains 1000 MNIST images, located in the public SageMaker sample data S3 bucket. To create the batch transform job, we simply call `transform()` on our transformer with information about the input data.
```
input_bucket_name = 'sagemaker-sample-data-{}'.format(region)
input_file_path = 'batch-transform/mnist-1000-samples'
transformer.transform('s3://{}/{}'.format(input_bucket_name, input_file_path), content_type='text/csv')
```
Now we wait for the batch transform job to complete. We have a convenience method, `wait()`, that will block until the batch transform job has completed. We can call that here to see if the batch transform job is still running; the cell will finish running when the batch transform job has completed.
```
transformer.wait()
```
## Download the results
The batch transform job uploads its predictions to S3. Since we did not specify `output_path` when creating the Transformer, one was generated based on the batch transform job name:
```
print(transformer.output_path)
```
Now let's download the first ten results from S3:
```
import json
from six.moves.urllib import parse
import boto3
parsed_url = parse.urlparse(transformer.output_path)
bucket_name = parsed_url.netloc
prefix = parsed_url.path[1:]
s3 = boto3.resource('s3')
predictions = []
for i in range(10):
file_key = '{}/data-{}.csv.out'.format(prefix, i)
output_obj = s3.Object(bucket_name, file_key)
output = output_obj.get()["Body"].read().decode('utf-8')
predictions.extend(json.loads(output)['outputs']['classes']['int64Val'])
```
For demonstration purposes, we're also going to download the corresponding original input data so that we can see how the model did with its predictions.
```
import os
import matplotlib.pyplot as plt
from numpy import genfromtxt
plt.rcParams['figure.figsize'] = (2,10)
def show_digit(img, caption='', subplot=None):
if subplot == None:
_,(subplot) = plt.subplots(1,1)
imgr = img.reshape((28,28))
subplot.axis('off')
subplot.imshow(imgr, cmap='gray')
plt.title(caption)
tmp_dir = '/tmp/data'
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
for i in range(10):
input_file_name = 'data-{}.csv'.format(i)
input_file_key = '{}/{}'.format(input_file_path, input_file_name)
s3.Bucket(input_bucket_name).download_file(input_file_key, os.path.join(tmp_dir, input_file_name))
input_data = genfromtxt(os.path.join(tmp_dir, input_file_name), delimiter=',')
show_digit(input_data)
```
Here, we can see the original labels are:
```
7, 2, 1, 0, 4, 1, 4, 9, 5, 9
```
Now let's print out the predictions to compare:
```
print(', '.join(predictions))
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Customization basics: tensors and operations
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/customization/basics"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/customization/basics.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/customization/basics.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/customization/basics.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This is an introductory TensorFlow tutorial that shows how to:
* Import the required package
* Create and use tensors
* Use GPU acceleration
* Demonstrate `tf.data.Dataset`
## Import TensorFlow
To get started, import the `tensorflow` module. As of TensorFlow 2, eager execution is turned on by default. This enables a more interactive frontend to TensorFlow, the details of which we will discuss much later.
```
import tensorflow as tf
```
## Tensors
A Tensor is a multi-dimensional array. Similar to NumPy `ndarray` objects, `tf.Tensor` objects have a data type and a shape. Additionally, `tf.Tensor`s can reside in accelerator memory (like a GPU). TensorFlow offers a rich library of operations ([tf.add](https://www.tensorflow.org/api_docs/python/tf/add), [tf.matmul](https://www.tensorflow.org/api_docs/python/tf/matmul), [tf.linalg.inv](https://www.tensorflow.org/api_docs/python/tf/linalg/inv) etc.) that consume and produce `tf.Tensor`s. These operations automatically convert native Python types, for example:
```
print(tf.add(1, 2))
print(tf.add([1, 2], [3, 4]))
print(tf.square(5))
print(tf.reduce_sum([1, 2, 3]))
# Operator overloading is also supported
print(tf.square(2) + tf.square(3))
```
Each `tf.Tensor` has a shape and a datatype:
```
x = tf.matmul([[1]], [[2, 3]])
print(x)
print(x.shape)
print(x.dtype)
```
The most obvious differences between NumPy arrays and `tf.Tensor`s are:
1. Tensors can be backed by accelerator memory (like GPU, TPU).
2. Tensors are immutable.
### NumPy Compatibility
Converting between a TensorFlow `tf.Tensor`s and a NumPy `ndarray` is easy:
* TensorFlow operations automatically convert NumPy ndarrays to Tensors.
* NumPy operations automatically convert Tensors to NumPy ndarrays.
Tensors are explicitly converted to NumPy ndarrays using their `.numpy()` method. These conversions are typically cheap since the array and `tf.Tensor` share the underlying memory representation, if possible. However, sharing the underlying representation isn't always possible since the `tf.Tensor` may be hosted in GPU memory while NumPy arrays are always backed by host memory, and the conversion involves a copy from GPU to host memory.
```
import numpy as np
ndarray = np.ones([3, 3])
print("TensorFlow operations convert numpy arrays to Tensors automatically")
tensor = tf.multiply(ndarray, 42)
print(tensor)
print("And NumPy operations convert Tensors to numpy arrays automatically")
print(np.add(tensor, 1))
print("The .numpy() method explicitly converts a Tensor to a numpy array")
print(tensor.numpy())
```
## GPU acceleration
Many TensorFlow operations are accelerated using the GPU for computation. Without any annotations, TensorFlow automatically decides whether to use the GPU or CPU for an operation—copying the tensor between CPU and GPU memory, if necessary. Tensors produced by an operation are typically backed by the memory of the device on which the operation executed, for example:
```
x = tf.random.uniform([3, 3])
print("Is there a GPU available: "),
print(tf.config.experimental.list_physical_devices("GPU"))
print("Is the Tensor on GPU #0: "),
print(x.device.endswith('GPU:0'))
```
### Device Names
The `Tensor.device` property provides a fully qualified string name of the device hosting the contents of the tensor. This name encodes many details, such as an identifier of the network address of the host on which this program is executing and the device within that host. This is required for distributed execution of a TensorFlow program. The string ends with `GPU:<N>` if the tensor is placed on the `N`-th GPU on the host.
### Explicit Device Placement
In TensorFlow, *placement* refers to how individual operations are assigned (placed on) a device for execution. As mentioned, when there is no explicit guidance provided, TensorFlow automatically decides which device to execute an operation and copies tensors to that device, if needed. However, TensorFlow operations can be explicitly placed on specific devices using the `tf.device` context manager, for example:
```
import time
def time_matmul(x):
start = time.time()
for loop in range(10):
tf.matmul(x, x)
result = time.time()-start
print("10 loops: {:0.2f}ms".format(1000*result))
# Force execution on CPU
print("On CPU:")
with tf.device("CPU:0"):
x = tf.random.uniform([1000, 1000])
assert x.device.endswith("CPU:0")
time_matmul(x)
# Force execution on GPU #0 if available
if tf.config.experimental.list_physical_devices("GPU"):
print("On GPU:")
with tf.device("GPU:0"): # Or GPU:1 for the 2nd GPU, GPU:2 for the 3rd etc.
x = tf.random.uniform([1000, 1000])
assert x.device.endswith("GPU:0")
time_matmul(x)
```
## Datasets
This section uses the [`tf.data.Dataset` API](https://www.tensorflow.org/guide/datasets) to build a pipeline for feeding data to your model. The `tf.data.Dataset` API is used to build performant, complex input pipelines from simple, re-usable pieces that will feed your model's training or evaluation loops.
### Create a source `Dataset`
Create a *source* dataset using one of the factory functions like [`Dataset.from_tensors`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_tensors), [`Dataset.from_tensor_slices`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_tensor_slices), or using objects that read from files like [`TextLineDataset`](https://www.tensorflow.org/api_docs/python/tf/data/TextLineDataset) or [`TFRecordDataset`](https://www.tensorflow.org/api_docs/python/tf/data/TFRecordDataset). See the [TensorFlow Dataset guide](https://www.tensorflow.org/guide/datasets#reading_input_data) for more information.
```
ds_tensors = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6])
# Create a CSV file
import tempfile
_, filename = tempfile.mkstemp()
with open(filename, 'w') as f:
f.write("""Line 1
Line 2
Line 3
""")
ds_file = tf.data.TextLineDataset(filename)
```
### Apply transformations
Use the transformations functions like [`map`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#map), [`batch`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#batch), and [`shuffle`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#shuffle) to apply transformations to dataset records.
```
ds_tensors = ds_tensors.map(tf.square).shuffle(2).batch(2)
ds_file = ds_file.batch(2)
```
### Iterate
`tf.data.Dataset` objects support iteration to loop over records:
```
print('Elements of ds_tensors:')
for x in ds_tensors:
print(x)
print('\nElements in ds_file:')
for x in ds_file:
print(x)
```
| github_jupyter |
Давайте решим следующую задачу.<br>
Необходимо написать робота, который будет скачивать новости с сайта Лента.Ру и фильтровать их в зависимости от интересов пользователя. От пользователя требуется отмечать интересующие его новости, по которым система будет выделять области его интересов.<br>
Для начала давайте разберемся с обработкой собственно текстов. Самостоятельно это можно сделать прочитав одну из двух книг: <a href='https://miem.hse.ru/clschool/the_book'>поновее</a> и <a href='http://clschool.miem.edu.ru/uploads/swfupload/files/011a69a6f0c3a9c6291d6d375f12aa27e349cb67.pdf'>постарше</a> (в старой хорошо разобраны классификация и кластеризация, в новой - тематическое моделирование и рядом лежит видео лекций).<br>
Для обработки текста проводится два этапа анализа: <b>графематический</b> (выделение предложений и слов) и <b>морфологический</b> (определение начальной формы слова, его части речи и грамматических параметров). Этап синтаксического анализа мы разбирать не будем, так как его информация требуется не всегда.<br>
Задачей графематического анализа является разделение текста на составные части - врезки, абзацы, предложения, слова. В таких задачах как машинный перевод, точность данного этапа может существенно влиять на точность получаемых результатов. Например, точка, используемая для сокращений, может быть воспринята как конец предложения, что полность разорвет его семантику.<br>
Но в некоторых задачах (например нашей) используется подход <b>"мешок слов"</b> - текст воспринимается как неупорядоченное множество слов, для которых можно просто посчитать их частотность в тексте. Данный подход проще реализовать, для него не нужно делать выделение составных частей текста, а необходимо только выделить слова. Именно этот подход мы и будем использовать.<br>
В путь!
```
import re # Регулярные выражения.
import requests # Загрузка новостей с сайта.
from bs4 import BeautifulSoup # Превращалка html в текст.
import pymorphy2 # Морфологический анализатор.
import datetime # Новости будем перебирать по дате.
from collections import Counter # Не считать же частоты самим.
import math # Корень квадратный.
```
Задачей морфологического анализа является определение начальной формы слова, его части речи и грамматических параметров. В некоторых случаях от слова требуется только начальная форма, в других - только начальная форма и часть речи.<br>
Существует два больших подхода к морфологическому анализу: <b>стемминг</b> и <b>поиск по словарю</b>. Для проведения стемминга оставляется справочник всех окончаний для данного языка. Для пришедшего слова проверяется его окончание и по нему делается прогноз начальной формы и части речи.<br>
Например, мы создаем справочник, в котором записываем все окончания прилагательных: <i>-ому, -ему, -ой, -ая, -ий, -ый, ...</i> Теперь все слова, которые имеют такое окончание будут считаться прилагаельными: <i>синий, циклический, красного, больному</i>. Заодно прилагательными будут считаться причастия (<i>делающий, строившему</i>) и местоимения (<i>мой, твой, твоему</i>). Также не понятно что делать со словами, имеющими пустое окончание. Отдельную проблему составляют такие слова, как <i>стекло, больной, вина</i>, которые могут разбираться несколькими вариантами (это явление называется <b>омонимией</b>). Помимо этого, стеммер может просто откусывать окончания, оставляя лишь псевдооснову.<br>
Большинство проблем здесь решается, но точность работы бессловарных стеммеров находится на уровне 80%. Чтобы повысить точность испольуют морфологический анализ со словарем. Разработчики составляют словарь слов, встретившихся в текстах (<a href="http://opencorpora.org/dict.php">здесь</a> можно найти пример такого словаря). Теперь каждое слово будет искаться в словаре и не предсказываться, а выдаваться точно. Для слов, отсутствующих в словаре, может применяться предсказание, пообное работе стеммера.<br>
Посмотрим как работает словарная морфология на примере системы pymorphy2.
```
morph=pymorphy2.MorphAnalyzer() # Создает объект морфоанализатора и загружет словарь.
wordform=morph.parse('стекло') # Проведем анализ слова "стекло"...
print(wordform) # ... и посмотрим на результат.
```
Как видно из вывода, слово "стекло" может быть неодушевленным существительным среднего рода, единственного числа, именительного падежа <i>tag=OpencorporaTag('NOUN,inan,neut sing,nomn')</i>, аналогично, но в винительном падеже (<i>'NOUN,inan,neut sing,accs'</i>), и глаголом <i>'VERB,perf,intr neut,sing,past,indc'</i>. При этом в первой форме оно встречается в 75% случаев (<i>score=0.75</i>), во второй в 18,75% случаев (<i>score=0.1875</i>), а как глагол - лишь в 6,25% (<i>score=0.0625</i>). Самым простым видом борьбы с омонимией является выбор нулевого элемента из списка, возвращенного морфологическим анализом. Такой подход дает около 90% точности при выборе начальной формы и до 80% если мы обращаем внимание на грамматические параметры.<br><br>
Теперь перейдем к загрузке новостей. Для этого нам потребуется метод requests.get(url). Библиотека requests предоставляет серьезные возможности для загрузки информации из Интернет. Метод get получает URL стараницы и возвращает ее содержимое. В нашем случае результат будет получаться в формате html.
```
requests.get("http://lenta.ru/")
```
Однако количество служебной информации в странице явно превышает объем текста новости. Мы проделаем два шага. На первом мы вырежем только саму новость с ее оформлением используя для этого регулярные выражения (библиотека re). На втором шаге мы используем библиотеку BeautifulSoup для "выкусыввания" тегов html.
```
# Компилируем регулярные выражения - так работает быстрее при большом количестве повторов.
findheaders = re.compile("<h1.+?>(.+)</h1>", re.S)
boa = re.compile('<div class="b-text clearfix js-topic__text" itemprop="articleBody">', re.S)
eoa = re.compile('<div class="b-box">\s*?<i>', re.S)
delscript = re.compile("<script.*?>.+?</script>", re.S)
def getLentaArticle(url):
# Получает текст страницы.
art=requests.get(url)
# Находим заголовок.
title = findheaders.findall(art.text)[0]
# Выделяем текст новости.
text = eoa.split(boa.split(art.text)[1])
# Иногда новость оканчивается другим набором тегов.
if len(text)==1:
text = re.split('<div itemprop="author" itemscope=""', text[0])
# Выкусываем скрипты - BeautifulSoup не справляетсяя с ними.
text = "".join(delscript.split(text[0]))
# Выкусываем остальные теги.
return BeautifulSoup(title+"\n-----\n"+text, "lxml").get_text()
art_text = getLentaArticle("https://lenta.ru/news/2018/02/15/greben/")
print(art_text)
```
Для новостной заметки можно составить ее словарь, а также посчитать частоты всех слов. В итоге мы получим представление текста в виде вектора. В этом векторе координаты будут называться по соответствующим словам, а смещение по данной координате будет показывать частота. <br>
При составлении словаря будем учитывать только значимые слова - существительные, прилагательные и глаголы. Помимо этого предусмотрим возможность учитывать часть речи слова, прибавляя ее у начальной форме.<br>
Для разделения текста на слова используем простейший алгоритм: слово - это последовательность букв русского алфавита среди которых может попадаться дефис.
```
posConv={'ADJF':'_ADJ','NOUN':'_NOUN','VERB':'_VERB'}
def getArticleDictionary(text, needPos=None):
words=[a[0] for a in re.findall("([А-ЯЁа-яё]+(-[А-ЯЁа-яё]+)*)", text)]
reswords=[]
for w in words:
wordform=morph.parse(w)[0]
if wordform.tag.POS in ['ADJF', 'NOUN', 'VERB']:
if needPos!=None:
reswords.append(wordform.normal_form+posConv[wordform.tag.POS])
else:
reswords.append(wordform.normal_form)
return Counter(reswords)
stat1=getArticleDictionary(art_text, True)
print(stat1)
```
Для определения меры сходства двух статей теперь может использоваться косинусная мера сходства, рассчитываемая по следующей формуле: $cos(a,b)=\frac{\sum{a_i * b_i}}{\sqrt {\sum{a_i^2}*\sum{b_i^2}}}$.<br>
Вообще-то, использовать стандартную функцию рассчета косинусной меры сходства из <a href="http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.cosine_similarity.html">sklearn</a> было бы быстрее. Но в данной задаче нам бы пришлось сводить все словари в один, чтобы на одних и тех же местах в векторе были частоты одних и тех же слов. Чтобы избежать подобной работы, напишем собственную функцию рассчета косинусного расстояния, работающую с разреженными векторами в виде питоновских словарей.
```
def cosineSimilarity(a, b):
if len(a.keys())==0 or len(b.keys())==0:
return 0
sumab=sum([a[na]*b[na] for na in a.keys() if na in b.keys()])
suma2=sum([a[na]*a[na] for na in a.keys()])
sumb2=sum([b[nb]*b[nb] for nb in b.keys()])
return sumab/math.sqrt(suma2*sumb2)
```
Посчитаем значение косинусной меры для разных статей.
```
stat2=getArticleDictionary(getLentaArticle("https://lenta.ru/news/2018/02/15/pengilly_domoi/"), True)
stat3=getArticleDictionary(getLentaArticle("https://lenta.ru/news/2018/02/15/tar_mor/"), True)
stat4=getArticleDictionary(getLentaArticle("https://lenta.ru/news/2018/02/15/olympmovies/"), True)
print(cosineSimilarity(stat1, stat2))
print(cosineSimilarity(stat1, stat3))
print(cosineSimilarity(stat2, stat3))
print(cosineSimilarity(stat2, stat4))
print(cosineSimilarity(stat3, stat4))
```
Получилось, на самом деле, так себе - статьи очень слабо походят друг на друга. Но может быть потом выйдет лучше.<br>
Пока оформим наш код в виде класса, который помимо загрузки новостей будет уметь сохранять их на диск и читать оттуда.
```
class getNewsPaper:
articles=[] # Загруженные статьи.
dictionaries=[] # Посчитанные словари (векторное представление статей).
# Конструктор - компилирует регулярные выражения и загружает морфологию.
def __init__(self):
self.delscript = re.compile("<script.*?>.+?</script>", re.S)
self.findheaders = re.compile("<h1.+?>(.+)</h1>", re.S)
self.boa = re.compile('<div class="b-text clearfix js-topic__text" itemprop="articleBody">', re.S)
self.eoa = re.compile('<div class="b-box">\s*?<i>', re.S)
self.findURLs = re.compile('<h3>(.+?)</h3>', re.S)
self.rboa = re.compile('<p class="MegaArticleBody_first-p_2htdt">', re.S)
self.reoa = re.compile('<div class="Attribution_container_28wm1">', re.S)
self.rfindURLs = re.compile('''<div class="headlineMed"><a href='(.+?)'>''', re.S)
# Создаем и загружаем морфологический словарь.
self.morph=pymorphy2.MorphAnalyzer()
# Загрузка статьи по URL.
def getLentaArticle(self, url):
""" getLentaArticle gets the body of an article from Lenta.ru"""
art = requests.get(url)
title = self.findheaders.findall(art.text)[0]
text = self.eoa.split(self.boa.split(art.text)[1])
if len(text)==1:
text = re.split('<div itemprop="author" itemscope=""', text[0])
text = "".join(self.delscript.split(text[0]))
self.articles.append(BeautifulSoup(title+"\n-----\n"+text, "lxml").get_text())
# Загрузка всех статей за один день.
def getLentaDay(self, url):
""" Gets all URLs for a given day and gets all texts. """
try:
day = requests.get(url) # Грузим страницу со списком всех статей.
cand = self.findURLs.findall(day.text) # Выделяем адреса статей.
links = ['https://lenta.ru'+re.findall('"(.+?)"', x)[0] for x in cand]
for l in links: # Загружаем статьи.
self.getLentaArticle(l)
except:
pass
# Загрузка всех статей за несколько дней.
def getLentaPeriod(self, start, finish):
curdate=start
while curdate<=finish:
print(curdate.strftime('%Y/%m/%d')) # Just in case.
# Список статей грузится с вот такого адреса.
res=self.getLentaDay('https://lenta.ru/news/'+curdate.strftime('%Y/%m/%d'))
curdate+=datetime.timedelta(days=1)
# Just in case.
def getReutersArticle(self, url):
""" Gets the body of an article from reuters.com's archive. """
try:
art = requests.get(url)
title = self.findheaders.findall(art.text)[0]
text = self.reoa.split(self.rboa.split(art.text)[1])[0]
text = "".join(self.delscript.split(text))
self.articles.append(BeautifulSoup(title+"\n-----\n"+text, "lxml").get_text())
except:
pass
def getReutersDay(self, url):
""" Gets all URLs for a given day and gets all texts. """
day = requests.get(url)
links = self.rfindURLs.findall(day.text)
for l in links:
self.getReutersArticle(l)
# Потроение вектора для статьи.
posConv={'ADJF':'_ADJ','NOUN':'_NOUN','VERB':'_VERB'}
def getArticleDictionary(self, text, needPos=None):
words=[a[0] for a in re.findall("([А-ЯЁа-яё]+(-[А-ЯЁа-яё]+)*)", text)]
reswords=[]
for w in words:
wordform=self.morph.parse(w)[0]
try:
if wordform.tag.POS in ['ADJF', 'NOUN', 'VERB']:
if needPos!=None:
reswords.append(wordform.normal_form+self.posConv[wordform.tag.POS])
else:
reswords.append(wordform.normal_form)
except:
pass
stat=Counter(reswords)
# stat={a: stat[a] for a in stat.keys() if stat[a]>1}
return stat
# Посчитаем вектора для всех статей.
def calcArticleDictionaries(self, needPos=None):
self.dictionaries=[]
for a in self.articles:
self.dictionaries.append(self.getArticleDictionary(a, needPos))
# Сохраняем стстьи в файл.
def saveArticles(self, filename):
""" Saves all articles to a file with a filename. """
newsfile=open(filename, "w")
for art in self.articles:
newsfile.write('\n=====\n'+art)
newsfile.close()
# Читаем статьи из файла.
def loadArticles(self, filename):
""" Loads and replaces all articles from a file with a filename. """
newsfile=open(filename)
text=newsfile.read()
self.articles=text.split('\n=====\n')[1:]
# self.articles=[a.replace('\xa0', ' ') for a in text.split('\n=====\n')[1:]]
newsfile.close()
# Для удобства - поиск статьи по ее заголовку.
def findNewsByTitle(self, title):
for i, a in enumerate(self.articles):
if title==a.split('\n-----\n')[0]:
return i
return -1
def cosineSimilarity(a, b):
if len(a.keys())==0 or len(b.keys())==0:
return 0
sumab=sum([a[na]*b[na] for na in a.keys() if na in b.keys()])
suma2=sum([a[na]*a[na] for na in a.keys()])
sumb2=sum([b[nb]*b[nb] for nb in b.keys()])
return sumab/math.sqrt(suma2*sumb2)
```
Загрузим статьи.<br>
<b>!!! Настоятельно рекомендую использовать ячейку с загрузкой статей из файла !!!</b>
```
# Загрузка статей за заданный период.
# !!! Это рабоатет довольно долго, пользуйтесь сохраненными данными!!!
lenta=getNewsPaper()
lenta.getLentaPeriod(datetime.date(2018, 2, 1), datetime.date(2018, 2, 14))
lenta.saveArticles("lenta2018.txt")
#lenta.loadArticles("lenta2018.txt")
lenta.calcArticleDictionaries()
lenta=getNewsPaper()
lenta.loadArticles("lenta2018.txt")
lenta.calcArticleDictionaries()
```
Из чистого любопытства попробуем найти статью, наиболее похожую на данную.
```
# Конечно же, правильнее делать это через np.argmax().
i1 = 0
maxCos, maxpos = -1, -1
for i in range(len(lenta.articles)):
if i != i1:
c = cosineSimilarity(lenta.dictionaries[i1], lenta.dictionaries[i])
if c>maxCos:
maxCos, maxpos = c, i
print(lenta.articles[i1].split('\n-----\n')[0])
print(lenta.articles[maxpos].split('\n-----\n')[0])
print(maxCos, maxpos)
```
Сходство между статьями достаточно велико. Есть большие шансы за то, что они об одном и том же.<br><br>
Теперь попробуем решить основную задачу.<br>
Пользователь выбирает несколько статей на интересующую его тематику. Пусть это будут олимпиада и выборы.
```
likesport=['Власти США обвинили МОК и ФИФА в коррупции', 'Пробирки WADA для допинг-проб оказались бракованными', 'Пожизненно отстраненных российских спортсменов оправдали', 'В Кремле порадовались за оправданных российских спортсменов', 'Россия вернется на первое место Олимпиады-2014', 'МОК разочаровало оправдание российских олимпийцев', 'Мутко загрустил после оправдания российских спортсменов', 'Оправданный призер Сочи-2014 призвал «добить ситуацию» с МОК', 'Путин предостерег от эйфории после оправдания российских олимпийцев', 'Родченков не смог вразумительно ответить на вопросы суда', 'Оправданный россиянин позлорадствовал над делившими медали Игр-2014 иностранцами', 'В CAS отказались считать оправданных россиян невиновными', 'Адвокат Родченкова заговорил о смерти чистого спорта после оправдания россиян', 'Американская скелетонистка сочла россиян ушедшими от законного наказания']
likeelect=['Социологи подсчитали планирующих проголосовать на выборах-2018', 'Собчак пообещала дать Трампу пару советов', 'На выборы президента России пойдут почти 80 процентов избирателей', 'Песков вспомнил предупреждение и отказался комментировать поездку Собчак в США', 'Собчак съездила на завтрак с Трампом и разочаровалась', 'Грудинин уступил в популярности КПРФ', 'Собчак потребовала признать незаконной регистрацию Путина на выборах', 'У Грудинина обнаружили два не до конца закрытых счета в Швейцарии и Австрии', 'Грудинин раскрыл историю происхождения дома в Испании', 'Путина зарегистрировали кандидатом в президенты', 'В Кремле отреагировали на слухи о голосовании Путина в Севастополе', 'Коммунистов вновь обвинили в незаконной агитации за Грудинина', 'ЦИК выявила обман со стороны Грудинина', 'Грудинин ответил на претензии ЦИК', 'Жириновский захотел сбросить ядерную бомбу на резиденцию Порошенко']
```
Теперь объединим все выбранные тексты в один и посчитаем ветор для него. Сделаем это два раза для выбранных тематик.
```
sporttext=' '.join([lenta.articles[lenta.findNewsByTitle(likesport[i])] for i in range(len(likesport))])
sportdict=lenta.getArticleDictionary(sporttext)
electtext=' '.join([lenta.articles[lenta.findNewsByTitle(likeelect[i])] for i in range(len(likeelect))])
electdict=lenta.getArticleDictionary(electtext)
#print(sportdict)
#print(electdict)
```
А теперь отберем все статьи, косинусная мера которых превышает некоторый порог.
```
thrs=0.4
thre=0.5
cosess=[lenta.articles[i].split('\n-----\n')[0] for i in range(len(lenta.dictionaries)) if cosineSimilarity(sportdict, lenta.dictionaries[i])>thrs]
print(cosess)
cosese=[lenta.articles[i].split('\n-----\n')[0] for i in range(len(lenta.dictionaries)) if cosineSimilarity(electdict, lenta.dictionaries[i])>thre]
print(cosese)
```
Для проверки загрузим новости за какой-то другой день.
```
lenta_new=getNewsPaper()
#lenta_new.getLentaPeriod(datetime.date(2018, 2, 15), datetime.date(2018, 2, 15))
#lenta_new.saveArticles("lenta20180215.txt")
lenta_new.loadArticles("lenta20180215.txt")
lenta_new.calcArticleDictionaries()
```
А теперь проверим какие новости будут находиться.
```
thrs_new = 0.3
thre_new = 0.3
cosess_new = [lenta_new.articles[i].split('\n-----\n')[0] for i in range(len(lenta_new.dictionaries)) if cosineSimilarity(sportdict, lenta_new.dictionaries[i])>thrs_new]
print(cosess_new)
cosese_new = [lenta_new.articles[i].split('\n-----\n')[0] for i in range(len(lenta_new.dictionaries)) if cosineSimilarity(electdict, lenta_new.dictionaries[i])>thre_new]
print(cosese_new)
```
Как видно, метод нуждается в более точном подборе и корректировке параметров.
Теперь попробуем применить для решения той же задачи модель Word2Vec, основная идея которой состоит в следующем. До сих пор мы работали в пространстве, размерность которого составляет несколько десятков, а может быть и сотен, тысяч измерений - по количеству используемых слов. Однако рядом будут находиться измерения для слов "бегемот" и "гиппопотам", являющихся синонимами. Следовательно, удалив одинаковые слова, мы можем снизить размерность пространства и уменьшить количество вычислений.<br>
Более того, каждое слово может быть выражено при помощи некоторых базовых понятий. Давайте попробуем отобразить теперь каждое слово в новое пространство, измерениями которого будут эти базовые понятия. Например, "король" будет раскладываться по измерениям "люди" (со значениями <b>"мужчина"</b> и "женщина"), "возраст" ("молодой", <b>"зрелый"</b>, "старый"), "власть" (<b>"верховная"</b>, "среднее звено", "местная", "локальная") и другим. При этом координаты не обязаны принимать заданные дискретные значения.<br>
Координаты слова в новом семантическом пространстве будут задаваться соседними словами. "Кушать" будет попадаться чаще с живыми существами, едой или посудой; "бегать" можно по некоторым местам и т.д. Правда, глаза могут и бегать, и есть. Это не будет добавлять модели детерминизма.<br>
Чтобы не мучиться в выбором новой системы координат натренируем некоторую модель, которая сама будет проводить уменьшение размерности пространства, а нам будет оставаться только выбрать число измерений. Эта же модель будет заниматься преобразованием точек старого пространства в новое. В этом новом семантическом пространстве становятся возможны векторные операции - сложение и вычитание. Разработчики модели Word2Vec утверждают, что они смогли получить "King"+"Man"-"Woman"="Queen". Посмотрим, получится ли у нас.
```
# Импортируем библиотеки Word2Vec
from gensim.models.word2vec import Word2Vec # Собственно модель.
from gensim.models.word2vec import LineSentence # Выравнивание текста по предложениям.
from gensim.models import KeyedVectors # Семантические вектора.
# На самом деле, нам потребуется только последняя.
import numpy as np # Вектора.
```
Теперь загрузим модель, обученную разработчиками проекта RusVectores для русского языка на новостях. В зависимости от того, откуда вы берете модели, они могут загружаться по-разному. Более того, модель можно обучить самому - для этого нужно просто взять много размеченных текстов.
```
model = KeyedVectors.load_word2vec_format('/home/edward/papers/kourses/Advanced Python/skillfactory/news_upos_cbow_600_2_2018.vec')
```
Теперь можно получить представление слов в новом пространстве. Имейте в виду, что в данной модели они идут с частями речи!
```
model['огонь_NOUN']
```
Среди прочего, библиотека позволяет найти наиболее близкие слова к данному. Или даже к сочетанию слов.
```
#model.most_similar(positive=[u'пожар_NOUN'])
#model.most_similar(positive=[u'пожар_NOUN', u'пламя_NOUN' ])
#model.most_similar(positive=[u'пожар_NOUN', u'пламя_NOUN' ], negative=[u'топливо_NOUN'])
#model.most_similar(positive=[u'женщина_NOUN', u'король_NOUN' ], negative=[u'мужчина_NOUN'])
model.most_similar(positive=[u'женщина_NOUN', u'король_NOUN' ])
```
У нас есть смысл отдельных слов. Построим на его основе смысл текста как среднее арифметическое всех векторов для слов, составляющих данный текст.
```
def text_to_vec(dct, model, size):
text_vec = np.zeros((size,), dtype="float32")
n_words = 0
index2word_set = set(model.index2word)
for word in dct.keys():
if word in index2word_set:
n_words = n_words + 1
text_vec = np.add(text_vec, model[word])
if n_words != 0:
text_vec /= n_words
return text_vec
```
Переразметим наши тексты так, чтобы они содержали в себе и часть речи.
```
lentaPos=getNewsPaper()
lentaPos.loadArticles("lenta2018.txt")
lentaPos.calcArticleDictionaries(True)
```
Теперь посмотрим какова размерность векторов, хранимых в модели, и сколько в ней слов.
```
print(len(model['огонь_NOUN']))
print(len(model.index2word))
```
Размерность векторов 600 - с запасом. Почти 300 000 слов - тоже очень хорошо.<br>
Теперь попробем найти косинусное расстояние между полученными векторами.
```
t2v1=text_to_vec(lentaPos.dictionaries[0], model, 600)
t2v2=text_to_vec(lentaPos.dictionaries[1], model, 600)
t2v516=text_to_vec(lentaPos.dictionaries[516], model, 600)
print(lentaPos.articles[0].split('\n-----\n')[0], lentaPos.articles[1].split('\n-----\n')[0])
print(cosineSimilarity(lentaPos.dictionaries[0], lentaPos.dictionaries[1]))
print(np.dot(t2v1, t2v2)/ np.linalg.norm(t2v1) / np.linalg.norm(t2v2))
print(lentaPos.articles[0].split('\n-----\n')[0], lentaPos.articles[516].split('\n-----\n')[0])
print(cosineSimilarity(lentaPos.dictionaries[0], lentaPos.dictionaries[516]))
print(np.dot(t2v1, t2v516)/ np.linalg.norm(t2v1) / np.linalg.norm(t2v516))
```
Как видно, значения косинусной меры несколько выросли. Но может быть вектора можно просто вычитать и складывать?
```
print(lentaPos.articles[0].split('\n-----\n')[0], lentaPos.articles[1].split('\n-----\n')[0])
print(np.linalg.norm(t2v1-t2v2))
print(lentaPos.articles[0].split('\n-----\n')[0], lentaPos.articles[516].split('\n-----\n')[0])
print(np.linalg.norm(t2v1-t2v516))
```
Всё логично - расстояние между последней парой статей должно быть меньше. Попробуем теперь решить нашу задачу - отбор новостей - новым методом. Для начала попробуем при помощи косинусной меры.
```
sportdictpos=lenta.getArticleDictionary(sporttext, True)
electdictpos=lenta.getArticleDictionary(electtext, True)
t2vs=text_to_vec(sportdictpos, model, 600)
t2ve=text_to_vec(electdictpos, model, 600)
thrs=0.85
thre=0.85
cosess=[lentaPos.articles[i].split('\n-----\n')[0] for i in range(len(lentaPos.dictionaries)) \
if np.dot(t2vs, text_to_vec(lentaPos.dictionaries[i], model, 600))/ \
np.linalg.norm(t2vs) / np.linalg.norm(text_to_vec(lentaPos.dictionaries[i], model, 600)) >thrs]
cosese=[lentaPos.articles[i].split('\n-----\n')[0] for i in range(len(lentaPos.dictionaries)) \
if np.dot(t2ve, text_to_vec(lentaPos.dictionaries[i], model, 600))/ \
np.linalg.norm(t2ve) / np.linalg.norm(text_to_vec(lentaPos.dictionaries[i], model, 600)) >thre]
print(thrs, thre)
print(cosess)
print(cosese)
thrs=0.8
thre=0.8
cosess=[lentaPos.articles[i].split('\n-----\n')[0] for i in range(len(lentaPos.dictionaries)) \
if np.dot(t2vs, text_to_vec(lentaPos.dictionaries[i], model, 600))/ \
np.linalg.norm(t2vs) / np.linalg.norm(text_to_vec(lentaPos.dictionaries[i], model, 600)) >thrs]
cosese=[lentaPos.articles[i].split('\n-----\n')[0] for i in range(len(lentaPos.dictionaries)) \
if np.dot(t2ve, text_to_vec(lentaPos.dictionaries[i], model, 600))/ \
np.linalg.norm(t2ve) / np.linalg.norm(text_to_vec(lentaPos.dictionaries[i], model, 600)) >thre]
print(thrs, thre)
print(cosess)
print(cosese)
thrs=0.9
thre=0.9
cosess=[lentaPos.articles[i].split('\n-----\n')[0] for i in range(len(lentaPos.dictionaries)) \
if np.dot(t2vs, text_to_vec(lentaPos.dictionaries[i], model, 600))/ \
np.linalg.norm(t2vs) / np.linalg.norm(text_to_vec(lentaPos.dictionaries[i], model, 600)) >thrs]
cosese=[lentaPos.articles[i].split('\n-----\n')[0] for i in range(len(lentaPos.dictionaries)) \
if np.dot(t2ve, text_to_vec(lentaPos.dictionaries[i], model, 600))/ \
np.linalg.norm(t2ve) / np.linalg.norm(text_to_vec(lentaPos.dictionaries[i], model, 600)) >thre]
print(thrs, thre)
print(cosess)
print(cosese)
```
Как видно, результат очень сильно зависит от порогового значения. А теперь решим ее просто вычитая вектора.
```
thrs=0.15
thre=0.15
cosess=[lentaPos.articles[i].split('\n-----\n')[0] for i in range(len(lentaPos.dictionaries)) \
if np.linalg.norm(t2vs-text_to_vec(lentaPos.dictionaries[i], model, 600))<thrs]
cosese=[lentaPos.articles[i].split('\n-----\n')[0] for i in range(len(lentaPos.dictionaries)) \
if np.linalg.norm(t2ve-text_to_vec(lentaPos.dictionaries[i], model, 600))<thre]
print(thrs, thre)
print(cosess)
print(cosese)
thrs=0.2
thre=0.2
cosess=[lentaPos.articles[i].split('\n-----\n')[0] for i in range(len(lentaPos.dictionaries)) \
if np.linalg.norm(t2vs-text_to_vec(lentaPos.dictionaries[i], model, 600))<thrs]
cosese=[lentaPos.articles[i].split('\n-----\n')[0] for i in range(len(lentaPos.dictionaries)) \
if np.linalg.norm(t2ve-text_to_vec(lentaPos.dictionaries[i], model, 600))<thre]
print(thrs, thre)
print(cosess)
print(cosese)
thrs=0.1
thre=0.1
cosess=[lentaPos.articles[i].split('\n-----\n')[0] for i in range(len(lentaPos.dictionaries)) \
if np.linalg.norm(t2vs-text_to_vec(lentaPos.dictionaries[i], model, 600))<thrs]
cosese=[lentaPos.articles[i].split('\n-----\n')[0] for i in range(len(lentaPos.dictionaries)) \
if np.linalg.norm(t2ve-text_to_vec(lentaPos.dictionaries[i], model, 600))<thre]
print(thrs, thre)
print(cosess)
print(cosese)
```
И снова мы видим сильную зависимость от выбранного порога.
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
%matplotlib inline
```
### 1. Load the dataset into a data frame named loans
```
loans = pd.read_csv('../data/lending-club-data.csv')
loans.head(2)
# safe_loans = 1 => safe
# safe_loans = -1 => risky
loans['safe_loans'] = loans['bad_loans'].apply(lambda x : +1 if x==0 else -1)
#loans = loans.remove_column('bad_loans')
loans = loans.drop('bad_loans', axis=1)
features = ['grade', # grade of the loan
'term', # the term of the loan
'home_ownership', # home_ownership status: own, mortgage or rent
'emp_length', # number of years of employment
]
target = 'safe_loans'
loans = loans[features + [target]]
loans.iloc[122602]
```
## One-hot encoding
```
categorical_variables = []
for feat_name, feat_type in zip(loans.columns, loans.dtypes):
if feat_type == object:
categorical_variables.append(feat_name)
for feature in categorical_variables:
loans_one_hot_encoded = pd.get_dummies(loans[feature],prefix=feature)
#print loans_one_hot_encoded
loans = loans.drop(feature, axis=1)
for col in loans_one_hot_encoded.columns:
loans[col] = loans_one_hot_encoded[col]
print (loans.head(2) )
print (loans.columns)
with open('../data/module-5-assignment-2-train-idx.json') as train_data_file:
train_idx = json.load(train_data_file)
with open('../data/module-5-assignment-2-test-idx.json') as test_data_file:
test_idx = json.load(test_data_file)
print (train_idx[:3])
print (test_idx[:3])
print len(train_idx)
print len(test_idx)
train_data = loans.iloc[train_idx]
test_data = loans.iloc[test_idx]
print (train_data.shape)
print (test_data.shape)
```
## Decision tree implementation
## Function to count number of mistakes while predicting majority class
Recall from the lecture that prediction at an intermediate node works by predicting the majority class for all data points that belong to this node. Now, we will write a function that calculates the number of misclassified examples when predicting the majority class. This will be used to help determine which feature is the best to split on at a given node of the tree.
Note: Keep in mind that in order to compute the number of mistakes for a majority classifier, we only need the label (y values) of the data points in the node.
Steps to follow:
- Step 1: Calculate the number of safe loans and risky loans.
- Step 2: Since we are assuming majority class prediction, all the data points that are not in the majority class are considered mistakes.
- Step 3: Return the number of mistakes.
7. Now, let us write the function intermediate_node_num_mistakes which computes the number of misclassified examples of an intermediate node given the set of labels (y values) of the data points contained in the node. Your code should be analogous to
```
def intermediate_node_num_mistakes(labels_in_node):
# Corner case: If labels_in_node is empty, return 0
if len(labels_in_node) == 0:
return 0
safe_loan = (labels_in_node==1).sum()
risky_loan = (labels_in_node==-1).sum()
return min(safe_loan, risky_loan)
```
8. Because there are several steps in this assignment, we have introduced some stopping points where you can check your code and make sure it is correct before proceeding. To test your intermediate_node_num_mistakes function, run the following code until you get a Test passed!, then you should proceed. Otherwise, you should spend some time figuring out where things went wrong. Again, remember that this code is specific to SFrame, but using your software of choice, you can construct similar tests.
```
# Test case 1
example_labels = np.array([-1, -1, 1, 1, 1])
if intermediate_node_num_mistakes(example_labels) == 2:
print ('Test 1 passed!')
else:
print ('Test 1 failed... try again!')
# Test case 2
example_labels = np.array([-1, -1, 1, 1, 1, 1, 1])
if intermediate_node_num_mistakes(example_labels) == 2:
print ('Test 2 passed!')
else:
print ('Test 2 failed... try again!')
# Test case 3
example_labels = np.array([-1, -1, -1, -1, -1, 1, 1])
if intermediate_node_num_mistakes(example_labels) == 2:
print ('Test 3 passed!')
else:
print ('Test 3 failed... try again!')
```
## Function to pick best feature to split on
The function best_splitting_feature takes 3 arguments:
- The data
- The features to consider for splits (a list of strings of column names to consider for splits)
- The name of the target/label column (string)
The function will loop through the list of possible features, and consider splitting on each of them. It will calculate the classification error of each split and return the feature that had the smallest classification error when split on.
Recall that the classification error is defined as follows:
### 9. Follow these steps to implement best_splitting_feature:
- Step 1: Loop over each feature in the feature list
- Step 2: Within the loop, split the data into two groups: one group where all of the data has feature value 0 or False (we will call this the left split), and one group where all of the data has feature value 1 or True (we will call this the right split). Make sure the left split corresponds with 0 and the right split corresponds with 1 to ensure your implementation fits with our implementation of the tree building process.
- Step 3: Calculate the number of misclassified examples in both groups of data and use the above formula to compute theclassification error.
- Step 4: If the computed error is smaller than the best error found so far, store this feature and its error.
Note: Remember that since we are only dealing with binary features, we do not have to consider thresholds for real-valued features. This makes the implementation of this function much easier.
Your code should be analogous to
```
def best_splitting_feature(data, features, target):
target_values = data[target]
best_feature = None # Keep track of the best feature
best_error = 2 # Keep track of the best error so far
# Note: Since error is always <= 1, we should intialize it with something larger than 1.
# Convert to float to make sure error gets computed correctly.
num_data_points = float(len(data))
# Loop through each feature to consider splitting on that feature
for feature in features:
# The left split will have all data points where the feature value is 0
left_split = data[data[feature] == 0]
# The right split will have all data points where the feature value is 1
right_split = data[data[feature] == 1]
# Calculate the number of misclassified examples in the left split.
# Remember that we implemented a function for this! (It was called intermediate_node_num_mistakes)
left_mistakes = intermediate_node_num_mistakes(left_split[target])
# Calculate the number of misclassified examples in the right split.
right_mistakes = intermediate_node_num_mistakes(right_split[target])
# Compute the classification error of this split.
# Error = (# of mistakes (left) + # of mistakes (right)) / (# of data points)
error = (left_mistakes + right_mistakes) / num_data_points
# If this is the best error we have found so far, store the feature as best_feature and the error as best_error
if error < best_error:
best_feature = feature
best_error = error
return best_feature # Return the best feature we found
```
## Building the tree
With the above functions implemented correctly, we are now ready to build our decision tree. Each node in the decision tree is represented as a dictionary which contains the following keys and possible values:
### 10. First, we will write a function that creates a leaf node given a set of target values.
Your code should be analogous to
```
def create_leaf(target_values):
# Create a leaf node
leaf = {'splitting_feature' : None,
'left' : None,
'right' : None,
'is_leaf': True } ## YOUR CODE HERE
# Count the number of data points that are +1 and -1 in this node.
num_ones = len(target_values[target_values == +1])
num_minus_ones = len(target_values[target_values == -1])
# For the leaf node, set the prediction to be the majority class.
# Store the predicted class (1 or -1) in leaf['prediction']
if num_ones > num_minus_ones:
leaf['prediction'] = 1 ## YOUR CODE HERE
else:
leaf['prediction'] = -1 ## YOUR CODE HERE
# Return the leaf node
return leaf
```
11. Now, we will provide a Python skeleton of the learning algorithm. Note that this code is not complete; it needs to be completed by you if you are using Python. Otherwise, your code should be analogous to
1. Stopping condition 1: All data points in a node are from the same class.
1. Stopping condition 2: No more features to split on.
1. Additional stopping condition: In addition to the above two stopping conditions covered in lecture, in this assignment we will also consider a stopping condition based on the max_depth of the tree. By not letting the tree grow too deep, we will save computational effort in the learning process.
```
def decision_tree_create(data, features, target, current_depth = 0, max_depth = 10):
remaining_features = features[:] # Make a copy of the features.
target_values = data[target]
print ("--------------------------------------------------------------------")
print ("Subtree, depth = %s (%s data points)." % (current_depth, len(target_values)))
# Stopping condition 1
# (Check if there are mistakes at current node.
# Recall you wrote a function intermediate_node_num_mistakes to compute this.)
if intermediate_node_num_mistakes(target_values) == 0: ## YOUR CODE HERE
print ("No classification error in the node. Stopping for now." )
# If not mistakes at current node, make current node a leaf node
return create_leaf(target_values)
# Stopping condition 2 (check if there are remaining features to consider splitting on)
if remaining_features == []: ## YOUR CODE HERE
print ("No remaining features. Stopping for now.")
# If there are no remaining features to consider, make current node a leaf node
return create_leaf(target_values)
# Additional stopping condition (limit tree depth)
if current_depth >= max_depth: ## YOUR CODE HERE
print ("Reached maximum depth. Stopping for now.")
# If the max tree depth has been reached, make current node a leaf node
return create_leaf(target_values)
# Find the best splitting feature (recall the function best_splitting_feature implemented above)
## YOUR CODE HERE
splitting_feature = best_splitting_feature(data, remaining_features, target)
# Split on the best feature that we found.
left_split = data[data[splitting_feature] == 0]
right_split = data[data[splitting_feature] == 1] ## YOUR CODE HERE
remaining_features.remove(splitting_feature)
print ("Split on feature %s. (%s, %s)" % (\
splitting_feature, len(left_split), len(right_split)))
# Create a leaf node if the split is "perfect"
if len(left_split) == len(data):
print ("Creating leaf node.")
return create_leaf(left_split[target])
if len(right_split) == len(data):
print ("Creating leaf node.")
## YOUR CODE HERE
return create_leaf(right_split[target])
# Repeat (recurse) on left and right subtrees
left_tree = decision_tree_create(left_split, remaining_features, target, current_depth + 1, max_depth)
## YOUR CODE HERE
right_tree = decision_tree_create(right_split, remaining_features, target, current_depth + 1, max_depth)
return {'is_leaf' : False,
'prediction' : None,
'splitting_feature': splitting_feature,
'left' : left_tree,
'right' : right_tree}
```
12. Train a tree model on the train_data. Limit the depth to 6 (max_depth = 6) to make sure the algorithm doesn't run for too long. Call this tree my_decision_tree. Warning: The tree may take 1-2 minutes to learn.
```
input_features = train_data.columns
print (list(input_features))
feature_list = list(train_data.columns)
feature_list.remove('safe_loans')
my_decision_tree = decision_tree_create(train_data, feature_list, 'safe_loans', current_depth = 0, max_depth = 6)
```
#### Making predictions with a decision tree
13. As discussed in the lecture, we can make predictions from the decision tree with a simple recursive function. Write a function called classify, which takes in a learned tree and a test point x to classify. Include an option annotate that describes the prediction path when set to True. Your code should be analogous to
```
def classify(tree, x, annotate = False):
# if the node is a leaf node.
if tree['is_leaf']:
if annotate:
print ("At leaf, predicting %s" % tree['prediction'])
return tree['prediction']
else:
# split on feature.
split_feature_value = x[tree['splitting_feature']]
if annotate:
print ("Split on %s = %s" % (tree['splitting_feature'], split_feature_value))
if split_feature_value == 0:
return classify(tree['left'], x, annotate)
else:
return classify(tree['right'], x, annotate)
```
### 14. Now, let's consider the first example of the test set and see what my_decision_tree model predicts for this data point.
```
print (test_data.iloc[0])
print ('Predicted class: %s ' % classify(my_decision_tree, test_data.iloc[0]))
```
### 15. Let's add some annotations to our prediction to see what the prediction path was that lead to this predicted class:
```
classify(my_decision_tree, test_data.iloc[0], annotate=True)
```
## Quiz question:
What was the feature that my_decision_tree first split on while making the prediction for test_data[0]?
## Quiz question:
What was the first feature that lead to a right split of test_data[0]?
## Quiz question:
What was the last feature split on before reaching a leaf node for test_data[0]?
## Answer:
term_36 months
## Answer:
grade_D
## Answer:
grade_D
## Evaluating your decision tree
### 16. Now, we will write a function to evaluate a decision tree by computing the classification error of the tree on the given dataset. Write a function called evaluate_classification_error that takes in as input:
- tree (as described above)
- data (a data frame of data points)
This function should return a prediction (class label) for each row in data using the decision tree. Your code should be analogous to
```
def evaluate_classification_error(tree, data):
# Apply the classify(tree, x) to each row in your data
prediction = data.apply(lambda x: classify(tree, x), axis=1)
# Once you've made the predictions, calculate the classification error and return it
## YOUR CODE HERE
return (data['safe_loans'] != np.array(prediction)).values.sum() *1. / len(data)
```
### 17. Now, use this function to evaluate the classification error on the test set.
```
evaluate_classification_error(my_decision_tree, test_data)
```
## Quiz Question:
Rounded to 2nd decimal point, what is the classification error of my_decision_tree on the test_data?
## Answer:
0.38
## Printing out a decision stump
### 18. As discussed in the lecture, we can print out a single decision stump (printing out the entire tree is left as an exercise to the curious reader). Here we provide Python code to visualize a decision stump. If you are using different software, make sure your code is analogous to:
```
def print_stump(tree, name = 'root'):
split_name = tree['splitting_feature'] # split_name is something like 'term. 36 months'
if split_name is None:
print ("(leaf, label: %s)" % tree['prediction'])
return None
split_feature, split_value = split_name.split('_',1)
print (' %s' % name)
print( ' |---------------|----------------|')
print (' | |')
print (' | |')
print (' | |')
print (' [{0} == 0] [{0} == 1] '.format(split_name))
print (' | |')
print (' | |')
print (' | |')
print (' (%s) (%s)' \
% (('leaf, label: ' + str(tree['left']['prediction']) if tree['left']['is_leaf'] else 'subtree'),
('leaf, label: ' + str(tree['right']['prediction']) if tree['right']['is_leaf'] else 'subtree')))
```
### 19. Using this function, we can print out the root of our decision tree:
```
print_stump(my_decision_tree)
```
## Quiz Question:
What is the feature that is used for the split at the root node?
## Answer:
term_ 36 months
## Exploring the intermediate left subtree
The tree is a recursive dictionary, so we do have access to all the nodes! We can use
- my_decision_tree['left'] to go left
- my_decision_tree['right'] to go right
### 20. We can print out the left subtree by running the code
```
print_stump(my_decision_tree['left'], my_decision_tree['splitting_feature'])
print_stump(my_decision_tree['left']['left'], my_decision_tree['left']['splitting_feature'])
print_stump(my_decision_tree['right'], my_decision_tree['splitting_feature'])
print_stump(my_decision_tree['right']['right'], my_decision_tree['right']['splitting_feature'])
```
## Quiz question:
What is the path of the first 3 feature splits considered along the left-most branch of my_decision_tree?
## Quiz question:
What is the path of the first 3 feature splits considered along the right-most branch of my_decision_tree?
## Answer
- term_ 36 months
- grade_A
- grade_B
## Answer
- term_ 36 months
- grade_D
- leaf
| github_jupyter |
# Exercise 6 - Statistical Reasoning - ‘k’ Nearest Neighbour
### AIM:
To write a python program to implement the 'k' Nearest Neighbour algorithm.
### ALGORITHM :
```
Algorithm euclidian_dist(p1,p2)
Input : p1,p2 - points as Tuple()s
Output : euclidian distance between the two points
return sqrt(
sum(
List([(p1[i]-p2[i])^2 for i <- 0 to p1.length])
)
)
end Algorithm
Algorithm KNN_classify(dataset,k,p)
Input : dataset – Dict() with class labels as keys
and data_points for the class as values.
p - test point p(x,y),
k - number of nearest neighbour.
Output : predicted class of the test point
dist=List([
Tuple(euclidian_dist(test_point,data_point),class)
for class in dataset
for data_point in class
])
dist = first k elements of sorted(dist,ascending)
freqs = Dict(class:(freqency of class in dist) for class in data_set)
return (class with max value in freqs)
end Algorithm
```
### SOURCE CODE :
```
from math import sqrt
def euclidian_dist(p1,p2):
return sqrt(
sum([(x1-x2)**2 for (x1,x2) in zip(p1,p2)])
)
class KNNClassifier:
def __init__(self,data_set,k=3,dist=euclidian_dist):
self.data_set = data_set
self.k = k
self.dist = dist
def classify(self,test_point):
distances = sorted([
(self.dist(data_point,test_point),data_class)
for data_class in self.data_set
for data_point in self.data_set[data_class]
])[:self.k]
freqs={data_class:0 for data_class in self.data_set}
for (_,data_class) in distances:
freqs[data_class]+=1
return max(freqs,key = freqs.get)
if __name__ == "__main__":
data_set = {
"Class 1":{(1,12),(2,5),(3,6),(3,10),(3.5,8),(2,11),(2,9),(1,7)},
"Class 2":{(5,3),(3,2),(1.5,9),(7,2),(6,1),(3.8,1),(5.6,4),(4,2),(2,5)}
}
test_points= [(2.5,7),(7,2.5)]
classifier = KNNClassifier(data_set,3)
for test_point in test_points:
print(
f"The given test point {test_point} is classified to:",
classifier.classify(test_point)
)
```
### Alternative method using numpy:
```
import numpy as np
def euclidian_dist_np(p1,p2):
return np.sqrt(np.sum((p1-p2)**2,axis=-1))
class KNNClassifier:
def __init__(self,train_x,train_y,k=3,dist=euclidian_dist_np):
self.train_x = train_x
assert train_y.dtype == np.int, "Class labels should be integers"
self.train_y = train_y
self.k = k
self.dist = dist
def classify(self,test_point):
k_nearest_classes = self.train_y[
# indexes of k nearest neignbours
np.argsort(self.dist(self.train_x,test_point))[:self.k]
]
# maximum occuring class
return np.bincount(k_nearest_classes).argmax()
if __name__ == "__main__":
dataset = np.loadtxt("knn_dataset.csv",dtype=np.float,delimiter=",")
train_x,train_y = dataset[:,:-1], dataset[:,-1].astype(np.int)
test_x= np.array([[2.5,7],[7,2.5]])
k = 3
classifier = KNNClassifier(train_x,train_y,k=k)
for test_vector in test_x:
print(
f"The given test point {test_vector} is classified to Class :",
classifier.classify(test_vector)
)
```
---
| github_jupyter |
# Find the comparables: extra_features.txt
The file `extra_features.txt` contains important property information like number and quality of pools, detached garages, outbuildings, canopies, and more. Let's load this file and grab a subset with the important columns to continue our study.
```
%load_ext autoreload
%autoreload 2
from pathlib import Path
import pickle
import pandas as pd
from src.definitions import ROOT_DIR
from src.data.utils import Table, save_pickle
extra_features_fn = ROOT_DIR / 'data/external/2016/Real_building_land/extra_features.txt'
assert extra_features_fn.exists()
extra_features = Table(extra_features_fn, '2016')
extra_features.get_header()
```
# Load accounts of interest
Let's remove the account numbers that don't meet free-standing single-family home criteria that we found while processing the `building_res.txt` file.
```
skiprows = extra_features.get_skiprows()
extra_features_df = extra_features.get_df(skiprows=skiprows)
extra_features_df.head()
extra_features_df.dscr.value_counts()
```
# Grab slice of the extra features of interest
With the value counts on the extra feature description performed above we can see that the majority of the features land in the top 6 categories. Let's filter out the rests of the columns.
```
cols = extra_features_df.dscr.value_counts().head(6).index
cond0 = extra_features_df['dscr'].isin(cols)
extra_features_df = extra_features_df.loc[cond0, :]
```
# Build pivot tables for count and grade
There appear to be two important values related to each extra feature:count and grade. Let's build individual pivot tables for each and merge them before saving them out.
```
extra_features_pivot_count = extra_features_df.pivot_table(index='acct',
columns='dscr',
values='count',
fill_value=0)
extra_features_pivot_count.head()
extra_features_pivot_grade = extra_features_df.pivot_table(index='acct',
columns='dscr',
values='grade')
extra_features_pivot_grade.head()
extra_features_count_grade = extra_features_pivot_count.merge(extra_features_pivot_grade,
how='left',
left_index=True,
right_index=True,
suffixes=('_count', '_grade'),
validate='one_to_one')
extra_features_count_grade.head()
assert extra_features_count_grade.index.is_unique
```
add `acct` column to make easier the merging process ahead
```
extra_features_count_grade.reset_index(inplace=True)
```
# Export real_acct
```
save_fn = ROOT_DIR / 'data/raw/2016/extra_features_count_grade_comps.pickle'
save_pickle(extra_features_count_grade, save_fn)
```
| github_jupyter |
# Python good practices
## Environment setup
```
!pip install papermill
import platform
print(f"Python version: {platform.python_version()}")
assert platform.python_version_tuple() >= ("3", "6")
import os
import papermill as pm
from IPython.display import YouTubeVideo
```
## Writing pythonic code
```
import this
```
### What does "Pythonic" mean?
- Python code is considered _pythonic_ if it:
- conforms to the Python philosophy;
- takes advantage of the language's specific features.
- Pythonic code is nothing more than **idiomatic Python code** that strives to be clean, concise and readable.
### Example: swapping two variables
```
a = 3
b = 2
# Non-pythonic
tmp = a
a = b
b = tmp
# Pythonic
a, b = b, a
```
### Example: iterating on a list
```
my_list = ["a", "b", "c"]
def do_something(item):
# print(item)
pass
# Non-pythonic
i = 0
while i < len(my_list):
do_something(my_list[i])
i += 1
# Still non-pythonic
for i in range(len(my_list)):
do_something(my_list[i])
# Pythonic
for item in my_list:
do_something(item)
```
### Example: indexed traversal
```
my_list = ["a", "b", "c"]
# Non-pythonic
for i in range(len(my_list)):
print(i, "->", my_list[i])
# Pythonic
for i, item in enumerate(my_list):
print(i, "->", item)
```
### Example: searching in a list
```
fruits = ["apples", "oranges", "bananas", "grapes"]
fruit = "cherries"
# Non-pythonic
found = False
size = len(fruits)
for i in range(0, size):
if fruits[i] == fruit:
found = True
# Pythonic
found = fruit in fruits
```
### Example: generating a list
This feature is called [list comprehension](https://docs.python.org/3/tutorial/datastructures.html#list-comprehensions).
```
numbers = [1, 2, 3, 4, 5, 6]
# Non-pythonic
doubles = []
for i in range(len(numbers)):
if numbers[i] % 2 == 0:
doubles.append(numbers[i] * 2)
else:
doubles.append(numbers[i])
# Pythonic
doubles = [x * 2 if x % 2 == 0 else x for x in numbers]
```
### Code style
- [PEP8](https://www.python.org/dev/peps/pep-0008/) is the official style guide for Python:
- use 4 spaces for indentation;
- define a maximum value for line length (around 80 characters);
- organize imports at beginning of file;
- surround binary operators with a single space on each side;
- ...
- Code style should be enforced upon creation by a tool like [black](https://github.com/psf/black).
### Beyond PEP8
Focusing on style and PEP8-compliance might make you miss more fundamental code imperfections.
```
YouTubeVideo("wf-BqAjZb8M")
```
### Docstrings
A [docstring](https://www.python.org/dev/peps/pep-0257/) is a string literal that occurs as the first statement in a module, function, class, or method definition to document it.
All modules, classes, public methods and exported functions should include a docstring.
```
def complex(real=0.0, imag=0.0):
"""Form a complex number.
Keyword arguments:
real -- the real part (default 0.0)
imag -- the imaginary part (default 0.0)
"""
if imag == 0.0 and real == 0.0:
return complex_zero
```
### Code linting
- _Linting_ is the process of checking code for syntactical and stylistic problems before execution.
- It is useful to catch errors and improve code quality in dynamically typed, interpreted languages, where there is no compiler.
- Several linters exist in the Python ecosystem. The most commonly used is [pylint](https://pylint.org/).
### Type annotations
- Added in Python 3.5, [type annotations](https://www.python.org/dev/peps/pep-0484/) allow to add type hints to code entities like variables or functions, bringing a statically typed flavour to the language.
- [mypy](http://mypy-lang.org/) can automatically check the code for annotation correctness.
```
def greeting(name: str) -> str:
return "Hello " + name
# greeting('Alice') # OK
# greeting(3) # mypy error: incompatible type "int"; expected "str"
```
### Unit tests
Unit tests automate the testing of individual code elements like functions or methods, thus decreasing the risk of bugs and regressions.
They can be implemented in Python using tools like [unittest](https://docs.python.org/3/library/unittest.html) or [pytest](https://docs.pytest.org).
```
def inc(x):
return x + 1
def test_answer():
assert inc(3) == 5 # AssertionError: assert 4 == 5
```
## Packaging and dependency management
### Managing dependencies in Python
- Most Python apps depend on third-party libraries and frameworks (NumPy, Flask, Requests...).
- These tools may also have external dependencies, and so on.
- **Dependency management** is necessary to prevent version conflicts and incompatibilities. it involves two things:
- a way for the app to declare its dependencies;
- a tool to resolve these dependencies and install compatible versions.
### Semantic versioning
- Software versioning convention used in many ecosystems.
- A version number comes as a suite of three digits `X.Y.Z`.
- X = major version (potentially including breaking changes).
- Y = minor version (only non-breaking changes).
- Z = patch.
- Digits are incremented as new versions are shipped.
### pip and requirements.txt
A `requirements.txt` file is the most basic way of declaring dependencies in Python.
```text
certifi>=2020.11.0
chardet==4.0.0
click>=6.5.0, <7.1
download==0.3.5
Flask>=1.1.0
```
The [pip](https://pypi.org/project/pip/) package installer can read this file and act accordingly, downloading dependencies from [PyPI](https://pypi.org/).
```bash
pip install -r requirements.txt
```
### Virtual environments
- A **virtual environment** is an isolated Python environment where a project's dependencies are installed.
- Using them prevents the risk of mixing dependencies required by different projects on the same machine.
- Several tools exist to manage virtual environments in Python, for example [virtualenv](https://virtualenv.pypa.io) and [conda](https://docs.conda.io).
### conda and environment.yml
Installed as part of the [Anaconda](https://www.anaconda.com/) distribution, the [conda](https://docs.conda.io) package manager reads an `environment.yml` file to install the dependencies associated to a specific virtual environment.
```yaml
name: example-env
channels:
- conda-forge
- defaults
dependencies:
- python=3.7
- matplotlib
- numpy
```
### Poetry
[Poetry](https://python-poetry.org) is a recent packaging and dependency management tool for Python. It downloads packages from [PyPI](https://pypi.org/) by default.
```bash
# Create a new poetry-compliant project
poetry new <project name>
# Initialize an already existing project for Poetry
poetry init
# Install defined dependencies
poetry install
# Add a package to project dependencies and install it
poetry add <package name>
# Update dependencies to sync them with configuration file
poetry update
```
### Poetry and virtual environments
By default, Poetry creates a virtual environment for the configured project in a user-specific folder. A standard practice is to store it in the project's folder.
```bash
# Tell Poetry to store the environment in the local project folder
poetry config virtualenvs.in-project true
# Activate the environment
poetry shell
```
### The pyproject.toml file
Poetry configuration file, soon-to-be standard for Python projects.
```toml
[tool.poetry]
name = "poetry example"
version = "0.1.0"
description = ""
[tool.poetry.dependencies]
python = ">=3.7.1,<3.10"
jupyter = "^1.0.0"
matplotlib = "^3.3.2"
sklearn = "^0.0"
pandas = "^1.1.3"
ipython = "^7.0.0"
[tool.poetry.dev-dependencies]
pytest = "^6.1.1"
```
### Caret requirements
Offers a way to precisely define dependency versions.
| Requirement | Versions allowed |
| :---------: | :--------------: |
| ^1.2.3 | >=1.2.3 <2.0.0 |
| ^1.2 | >=1.2.0 <2.0.0 |
| ~1.2.3 | >=1.2.3 <1.3.0 |
| ~1.2 | >=1.2.0 <1.3.0 |
| 1.2.3 | 1.2.3 only |
### The poetry.lock file
- The first time Poetry install dependencies, it creates a `poetry.lock` file that contains the exact versions of all installed packages.
- Subsequent installs will use these exact versions to ensure consistency.
- Removing this file and running another Poetry install will fetch the latest matching versions.
## Working with notebooks
### Advantages of Jupyter notebooks
- Standard format for mixing text, images and (executable) code.
- Open source and platform-independant.
- Useful for experimenting and prototyping.
- Growing ecosystem of [extensions](https://tljh.jupyter.org/en/latest/howto/admin/enable-extensions.html) for various purposes and cloud hosting solutions ([Colaboratory](https://colab.research.google.com/), [AI notebooks](https://www.ovhcloud.com/en/public-cloud/ai-notebook/)...).
- Integration with tools like [Visual Studio Code](https://code.visualstudio.com/docs/datascience/jupyter-notebooks).
### Drawbacks of Jupyter notebooks
- Arbitrary execution order of cells can cause confusing errors.
- Notebooks don't encourage good programming habits like modularization, linting and tests.
- Being JSON-based, their versioning is more difficult than for plain text files.
- Dependency management is also difficult, thus hindering reproducibility.
### Collaborating with notebooks
A common solution for sharing notebooks between a team is to use [Jupytext](https://jupytext.readthedocs.io). This tool can associate an `.ipynb` file with a Python file to facilitate collaboration and version control.
[](https://jupytext.readthedocs.io/en/latest/examples.html)
### Code organization
Monolithic notebooks can grow over time and become hard to understand and maintain.
Just like in a traditional software project, it is possible to split them into separate parts, thus following the [separation of concerns](https://en.wikipedia.org/wiki/Separation_of_concerns) design principle.
Code can be splitted into several sub-notebooks and/or external Python files. The latter facilitates unit testing and version control.
### Notebook workflow
Tools like [papermill](https://papermill.readthedocs.io) can orchestrate the execution of several notebooks in a row. External parameters can be passed to notebooks, and the runtime flow can depend on the execution results of each notebook.
```
# Doesn't work on Google Colaboratory. Workaround here:
# https://colab.research.google.com/github/rjdoubleu/Colab-Papermill-Patch/blob/master/Colab-Papermill-Driver.ipynb
notebook_dir = "./papermill"
result = pm.execute_notebook(
os.path.join(notebook_dir, "simple_input.ipynb"),
os.path.join(notebook_dir, "simple_output.ipynb"),
parameters={"msg": "Hello"},
)
```
| github_jupyter |
# Module
```
import numpy as np
import pandas as pd
import warnings
import gc
from tqdm import tqdm_notebook as tqdm
import lightgbm as lgb
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
from sklearn.metrics import roc_auc_score
warnings.filterwarnings("ignore")
gc.enable()
pd.set_option('max_rows', 500)
pd.set_option('max_colwidth', 500)
pd.set_option('max_columns', 500)
```
# Load Data
```
train_raw = pd.read_csv('./data/train.csv')
test_raw = pd.read_csv('./data/test.csv')
train_raw.shape, test_raw.shape
del train, test, clf, data
gc.collect()
train = train_raw.copy()
test = test_raw.copy()
col_list = train.columns[2:]
train_0 = train[train.target == 0]
train_1 = train[train.target == 1]
pb_idx = np.load('./data_temp/public_LB.npy')
pv_idx = np.load('./data_temp/private_LB.npy')
test_pb = test.iloc[pb_idx].sort_index().copy()
test_pv = test.iloc[pv_idx].sort_index().copy()
test_real = test_pb.append(test_pv)
data = train.append(test_real)[['ID_code', 'target'] + col_list.tolist()]
```
# Extract Unique Value in All Data
## filter
```
# unique_df = data[['ID_code']]
con_df = data[['ID_code']]
con1_df = data[['ID_code']]
con2_df = data[['ID_code']]
con3_df = data[['ID_code']]
con4_df = data[['ID_code']]
con5_df = data[['ID_code']]
con6_df = data[['ID_code']]
con7_df = data[['ID_code']]
con8_df = data[['ID_code']]
for col in tqdm(col_list):
# unique_df[col] = data[col].map(((data[col].value_counts() == 1) * 1).to_dict())
con_df[col] = data[col].map((~(data[col].value_counts() == 1) * 1).to_dict())
con1_df[col] = data[col].map(((data[col].value_counts() == 1) * 1).to_dict())
con2_df[col] = data[col].map(((data[col].value_counts() == 2) * 1).to_dict())
con3_df[col] = data[col].map(((data[col].value_counts() == 3) * 1).to_dict())
con4_df[col] = data[col].map(((data[col].value_counts() == 4) * 1).to_dict())
con5_df[col] = data[col].map(((data[col].value_counts() == 5) * 1).to_dict())
con6_df[col] = data[col].map(((data[col].value_counts() == 6) * 1).to_dict())
con7_df[col] = data[col].map(((data[col].value_counts() == 7) * 1).to_dict())
con8_df[col] = data[col].map(((data[col].value_counts() == 8) * 1).to_dict())
order_df = data[['ID_code']]
for col in tqdm(col_list):
temp = data[col].value_counts().sort_index().to_frame()
order = [0]
for v in temp.iterrows():
order.append(order[-1] + v[1].values[0])
temp[col] = order[:-1]
temp = temp.to_dict()[col]
order_df[col] = data[col].map(temp)
```
## make data
```
for col in tqdm(col_list):
# data[col + '_unique'] = data[col] * unique_df[col]
data[col + '_con'] = data[col] * con_df[col]
data[col + '_con1'] = data[col] * con1_df[col]
data[col + '_con2'] = data[col] * con2_df[col]
data[col + '_con3'] = data[col] * con3_df[col]
data[col + '_con4'] = data[col] * con4_df[col]
data[col + '_con5'] = data[col] * con5_df[col]
data[col + '_con6'] = data[col] * con6_df[col]
data[col + '_con7'] = data[col] * con7_df[col]
data[col + '_con8'] = data[col] * con8_df[col]
for col in tqdm(col_list):
# data.loc[data[col + '_unique']==0, col + '_unique'] = np.nan
data.loc[data[col + '_con']==0, col + '_con'] = np.nan
data.loc[data[col + '_con1']==0, col + '_con1'] = np.nan
data.loc[data[col + '_con2']==0, col + '_con2'] = np.nan
data.loc[data[col + '_con3']==0, col + '_con3'] = np.nan
data.loc[data[col + '_con4']==0, col + '_con4'] = np.nan
data.loc[data[col + '_con5']==0, col + '_con5'] = np.nan
data.loc[data[col + '_con6']==0, col + '_con6'] = np.nan
data.loc[data[col + '_con7']==0, col + '_con7'] = np.nan
data.loc[data[col + '_con8']==0, col + '_con8'] = np.nan
for col in tqdm(col_list):
data[col + '_con_multi_counts'] = data[col + '_con'] * data[col].map(data[col].value_counts().to_dict())
for col in tqdm(col_list):
data[col + '_con_order'] = con_df[col] * order_df[col]
for col in tqdm(col_list):
data.loc[data[col + '_con_order']==0, col + '_con_order'] = np.nan
for col in tqdm(col_list):
data[col + '_unique_order'] = unique_df[col] * order_df[col]
for col in tqdm(col_list):
data.loc[data[col + '_unique_order']==0, col + '_unique_order'] = np.nan
data.head()
```
# Model
```
train = data[~data.target.isna()]
test = data[data.target.isna()]
target = train['target']
param = {
'bagging_freq': 5,
'bagging_fraction': 0.335,
'boost_from_average': False,
'boost': 'gbdt',
'feature_fraction_seed': 47,
'feature_fraction': 0.041,
'learning_rate': 0.01,
'max_depth': -1,
'metric':'auc',
'min_data_in_leaf': 80,
'min_sum_hessian_in_leaf': 10.0,
'num_leaves': 2,
'num_threads': 8,
'tree_learner': 'serial',
'objective': 'binary',
'verbosity': -1,
'num_threads': 8
}
```
* 0.92288
* 0.92308
```
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
oof_lgb = np.zeros(len(train))
predictions_lgb = np.zeros(len(test))
feature_importance = pd.DataFrame()
train_columns = [c for c in train.columns if c not in ['ID_code', 'target']]
for fold_, (trn_idx, val_idx) in enumerate(folds.split(train, target.values)):
print("fold n°{}".format(fold_))
trn_data = lgb.Dataset(train.iloc[trn_idx][train_columns], label=target.iloc[trn_idx])
val_data = lgb.Dataset(train.iloc[val_idx][train_columns], label=target.iloc[val_idx])
num_round = 500000
clf = lgb.train(param, trn_data, num_round, valid_sets = [trn_data, val_data], verbose_eval=5000, early_stopping_rounds = 3500)
oof_lgb[val_idx] = clf.predict(train.iloc[val_idx][train_columns], num_iteration=clf.best_iteration)
predictions_lgb += clf.predict(test[train_columns], num_iteration=clf.best_iteration) / folds.n_splits
fold_importance = pd.DataFrame()
fold_importance["Feature"] = train_columns
fold_importance["importance"] = clf.feature_importance()
fold_importance["fold"] = fold_ + 1
feature_importance = pd.concat([feature_importance, fold_importance], axis=0)
print("CV score: {:<8.5f}".format(roc_auc_score(target.values[val_idx], oof_lgb[val_idx])))
print("CV score: {:<8.5f}".format(roc_auc_score(target.values, oof_lgb)))
best_features = (feature_importance[["Feature", "importance"]]
.groupby("Feature")
.mean()
.sort_values(by="importance", ascending=False)[400:])
plt.figure(figsize=(14,112))
sns.barplot(x="importance", y="Feature", data=best_features.reset_index())
plt.title('Features importance (averaged/folds)')
plt.tight_layout()
test['target'] = predictions_lgb
sub = pd.read_csv('./data/sample_submission.csv')
unchange = sub[~sub.ID_code.isin(test.ID_code)]
sub = test[['ID_code', 'target']].append(unchange).sort_index()
sample = pd.read_csv('./data/sub_lgb_5fold5aug_concategory_cv_0.9242224159538349.csv')
sample['new_target'] = sub.target
sample[sample.new_target != 0].corr()
sub.to_csv('./data/sub_lgb_noAug_cv_0.923.csv', index=False)
for col in tqdm(col_list):
data[col + '_con_category'] = np.around(data[col + '_con'], 0)
# data[col + '_unique_category'] = np.around(data[col + '_unique'], 0)
for col in tqdm(col_list):
le = LabelEncoder()
le.fit(data[col + '_con_category'].fillna(0))
data[col + '_con_category'] = le.transform(data[col + '_con_category'].fillna(0))
for col in tqdm(col_list):
le = LabelEncoder()
le.fit(data[col + '_unique_category'].fillna(0))
data[col + '_unique_category'] = le.transform(data[col + '_unique_category'].fillna(0))
for col in tqdm(col_list):
data[col + '_unique_category'] = data[col + '_unique_category'].astype('category')
data = pd.get_dummies(data, columns=[col + '_con_category' for col in col_list])
```
| github_jupyter |
This is the second day of the 5-Day Regression Challenge. You can find the first day's challenge [here](https://www.kaggle.com/rtatman/regression-challenge-day-1). Today, we’re going to learn how to fit a model to data and how to make sure we haven’t violated any of the underlying assumptions. First, though, you need a tiny bit of background:
____
**Regression formulas in R**
In R, regression is expressed using a specific type of object called a formula. This means that the syntax for expressing a regression relationship is the same across packages that use formula objects. The general syntax for a formula looks like this:
Output ~ input
If you think that more than one input might be affecting your output (for example that both the amount of time spent exercising and the number of calories consumed might affect changes in someone’s weight) you can represent that with this notation:
Output ~ input1 + input2
We'll talk about how to know which inputs you should include later on: for now, let's just stick to picking inputs based on questions that are interesting to you. (Figuring out how to turn a quesiton into a
**What are these “residuals” everyone keeps talking about?**
A residual is just how far off a model is for a single point. So if our model predicts that a 20 pound cantaloupe should sell for eight dollars and it actually sells for ten dollars, the residual for that data point would be two dollars. Most models will be off by at least a little bit for pretty much all points, but you want to make sure that there’s not a strong pattern in your residuals because that suggests that your model is failing to capture some underlying trend in your dataset.
____
Today, we're going to practice fitting a regression model to our data and examining the residuals to see if our model is a good representation of our data.
___
<center>
[**You can check out a video that goes with this notebook by clicking here.**](https://www.youtube.com/embed/3C8SxyD8C7I)
## Example: Kaggle data science survey
___
For our example today, we're going to use the Kaggle we’re going to use the 2017 Kaggle ML and Data Science Survey. I’m interested in seeing if we can predict the salary of data scientists based on their age. My intuition is that older data scientists, who are probably more experienced, will have higher salaries.
Because salary is a count value (you're usually paid in integer increments of a unit of currency, and hopefully you shouldn't be being paid a negative amount), we're going to model this with a Poisson regression.
Before we train a model, however, we need to set up our environment. I'm going to read in two datasets: the Kaggle Data Science Survey for the example and the Stack Overflow Developer Survey for you to work with.
```
# libraries
library(tidyverse)
library(boot) #for diagnostic plots
# read in data
kaggle <- read_csv("../input/kaggle-survey-2017/multipleChoiceResponses.csv")
stackOverflow <- read_csv("../input/so-survey-2017/survey_results_public.csv")
```
Now that we've got our environment set up, I'm going to do a tiny bit of data cleaning. First, I only want to look at rows where we have people who have reported having compensation of more than 0 units of currency. (There are many different currencies in the dataset, but for simplicity I'm going to ignore them.)
```
# do some data cleaning
has_compensation <- kaggle %>%
filter(CompensationAmount > 0) %>% # only get salaries of > 0
mutate(CleanedCompensationAmount = str_replace_all(CompensationAmount,"[[:punct:]]", "")) %>%
mutate(CleanedCompensationAmount = as.numeric(CleanedCompensationAmount))
# the last two lines remove puncutation (some of the salaries has commas in them)
# and make sure that salary is numeric
```
Alright, now we're ready to fit our model! To do this, we need to pass the function glm() a formula with the columns we're interested in, the name of the dataframe (so it knows where the columns are from) and the family for our model. Remember from earlier that our formula should look like this:
Output ~ input
We're also predicting a count value, as discussed above, so we want to make sure the family is Poisson.
```
# poisson model to predict salary by age
model <- glm(CleanedCompensationAmount ~ Age, data = has_compensation, family = poisson)
```
We'll talk about how to examine and interpret a model tomorrow. For now, we want to make sure that it's a good fit for our data and problem. To do this, let's use some diagnostic plots.
```
# diagnostic plots
glm.diag.plots(model)
```
All of these diagnostic plots are plotting residuals, or how much our model is off for a specific prediction. Spoiler alert: all of these plots are showing us big warning signs for this model! Here's what they should look like:
* **Residuals vs Linear predictor**: You want this to look like a shapeless cloud. If there are outliers it means you've gotten some things very wrong, and if there's a clear pattern it usually means you've picked the wrong type of model. (For logistic regression, you can just ignore this plot. It's checking if the residuals are normally distributed, and logistic regression doesn't assume that they will be.)
* **Quantiles of standard normal vs. ordered deviance residuals**: For this plot you want to see the residuals lined up along the a diagonal line that goes from the bottom left to top right. If they're strongly off that line, especially in one corner, it means you have a strong skew in your data. (For logistic regression you can ignore this plot too.)
* **Cook's distance vs. h/(1-h)**: Here, you want your data points to be clustered near zero. If you have a data point that is far from zero (on either axis) it means that it's very influential and that one point is dramatically changing your analysis.
* **Cook's distance vs. case**: In this plot, you want your data to be mostly around zero on the y axis. The x axis just tells you what row in your dataframe the observation is taken from. Points that are outliers on the y axis are changing your model a lot and should probably be removed (unless you have a good reason to include them).
Based on these diagnostic plots, we should definitely not trust this model. There are a small handful of very influential points that are drastically changing our model. Remember, we didn't convert all the currencies to the same currency, so we're probably seeing some weirdnesses due to including a currency like the Yen, which is worth roughly one one-hundredth of a dollar.
With that in mind, let's see how the plots change when we remove any salaries above 200,000.
```
# remove compensation values above 150,000
has_compensation <- has_compensation %>%
filter(CleanedCompensationAmount < 150000)
# linear model to predict salary by age
model <- glm(CleanedCompensationAmount ~ Age, data = has_compensation, family = poisson)
# diagnostic plots
glm.diag.plots(model)
```
Now our plots looks much better! Our residuals are more-or-less randomly distributed (which is what the first two plots tell us) and while we still have one outstanding influential point, we can tell by comparing the Cook statistics from the first and second set of plots that it's waaaaaaaayyy less influential than the outliers we got rid of.
Our first model would probably not have been very informative for a new set of observations. Our second model is more likely to be helpful.
As a final step, we can fit & plot a model to our data, like we did yesterday to see if our hunch about age and salary was correct.
```
# plot & add a regression line
ggplot(has_compensation, aes(x = Age, y = CleanedCompensationAmount)) + # draw a
geom_point() + # add points
geom_smooth(method = "glm", # plot a regression...
method.args = list(family = "poisson")) # ...from the binomial family
```
It looks like we were right about older data scientists making more. It does look like there are some outliers in terms of age, which we could remove with further data cleaning (which you're free to do if you like). First, however, why don't you try your hand at fitting a model and using diagnostic plots to check it out?
## Your turn!
___
Now it's your turn to come up with a model and check it out using diagnostic plots!
1. Pick a question to answer to using the Stack Overflow dataset. (You may want to check out the "survey_results_schema.csv" file to learn more about the data.) Pick a variable to predict and one varaible to use to predict it.
2. Fit a GLM model of the appropriate family. (Check out [yesterday's challenge](https://www.kaggle.com/rtatman/regression-challenge-day-1) if you need a refresher.
3. Plot diagnostic plots for your model. Does it seem like your model is a good fit for your data? Are the residuals normally distributed (no patterns in the first plot and the points in the second plot are all in a line)? Are there any influential outliers?
4. Plot your two variables & use "geom_smooth" and the appropriate family to fit and plot a model
5. Optional: If you want to share your analysis with friends or to ask for help, you’ll need to make it public so that other people can see it.
* Publish your kernel by hitting the big blue “publish” button. (This may take a second.)
* Change the visibility to “public” by clicking on the blue “Make Public” text (right above the “Fork Notebook” button).
* Tag your notebook with 5daychallenge
```
# summary of the dataset
summary(stackOverflow)
# convert YearsProgram to int and filter NAs and Non US Dollar entries
stackOverflow <- stackOverflow %>%
mutate(YearsProgram = as.integer(str_match(YearsProgram, "[0-9]+"))) %>%
filter(!is.na(Salary) & !is.na(YearsProgram) & Currency == "U.S. dollars ($)")
table(stackOverflow$YearsProgram)
stackOverflow %>%
ggplot(aes(Salary)) +
geom_histogram()
# train the model
model <- glm(Salary ~ YearsProgram, data = stackOverflow, family = "gaussian")
# diagnostic plots
glm.diag.plots(model)
# plot the model
stackOverflow %>%
ggplot(aes(YearsProgram, Salary)) +
geom_point() +
geom_smooth(method="glm", method.args=list(family = "gaussian")) +
ggtitle("Salary Vs YearsProgram")
```
| github_jupyter |
```
import torch
import torch.nn as nn
import torch.nn.functional as F
x = torch.ones((4,4))
x
```
Pytorch 입력의 형태
* input type: torch.Tensor
* input shape : `(batch_size, channel, height, width)`
```
x = x.view(-1, 1, 4, 4)
x
x.shape
```
## Conv2d
```
out = nn.Conv2d(1, 1, kernel_size = 3, stride = 1, padding = 1, bias = False)
nn.init.constant_(out.weight.data, 1) # initialize weights
```
### Convolution output size
$$ Output\;Size = floor(\frac{Input\;Size - Kernel\;Size + (2*padding)}{Stride})+1 $$
input size= 4, filter size= 3, padding= 1, Stride= 1
```
out(x)
out(x).shape
```
input size= 4, filter size= 3, padding= 2, Stride= 1
```
out.kernel_size = 3
out.padding = 2
out(x)
out(x).shape
```
## ConvTranspose2d
- Refer to https://pytorch.org/docs/stable/generated/torch.nn.ConvTranspose2d.html
`padding` controls the amount of implicit zero-paddings on both sides for `dilation * (kernel_size - 1) - padding` number of points. See note below for details.
위에서의 `padding`은 Transpose Conv를 위해 input에 추가로 들어가는 padding 수를 의미. <br>
따라서 아래 nn.ConvTranspose2d와 같이 padding = 0로 정할 경우, <br>
padding = 1*(3-1)-0 = 2. Input Size(4X4->6X6)
결국 ConvTrnspose2d 또한 <br>
padding을 붙이는 방식만 위와 같이 한 Conv2d 계산을 함(dilation=1일 때)
## Padding Check
```
transpose = nn.ConvTranspose2d(in_channels=1, out_channels=1, kernel_size=2, stride=1, padding=0, output_padding=0, bias=False)
nn.init.constant_(transpose.weight.data,1)
nn.ZeroPad2d((1, 1, 1, 1))(x)
transpose(x)
transpose(x).shape
transpose = nn.ConvTranspose2d(in_channels=1, out_channels=1, kernel_size=2, stride=1, padding=1, output_padding=0, bias=False)
nn.init.constant_(transpose.weight.data,1)
nn.ZeroPad2d((0, 0, 0, 0))(x)
transpose(x)
transpose(x).shape
```
refer to https://medium.com/apache-mxnet/transposed-convolutions-explained-with-ms-excel-52d13030c7e8
- <Figure 9>참고
```
transpose = nn.ConvTranspose2d(in_channels=1, out_channels=1, kernel_size=3, stride=1, padding=0, output_padding=0, bias=False)
nn.init.constant_(transpose.weight.data,1)
nn.ZeroPad2d((2, 2, 2, 2))(x)
```
input size= 4, filter size= 3, padding= 0, Stride= 1, output_padding = 0
```
transpose(x)
transpose(x).shape
```
input size= 4, filter size= 3, padding= 2(padding is zero), Stride= 1, output_padding = 0
```
nn.ZeroPad2d((0, 0, 0, 0))(x)
transpose.output_padding = 0
transpose.padding = 2
transpose.stride = 1
transpose(x)
transpose(x).shape
```
input size= 4, filter size= 3, padding= 0, Stride= 2, output_padding = 0
```
nn.ZeroPad2d((1, 1, 1, 1))(x)
transpose.output_padding = 0
transpose.padding = 2
transpose.stride = 2
transpose(x)
transpose(x).shape
```
## Transpose Prac
```
tx = torch.ones((1,2,1,1))
tx
tx.shape
deconv1 = F.interpolate(F.leaky_relu(tx), scale_factor=2)
deconv1.shape
deconv1 = nn.ConvTranspose2d(2, 4, 7, bias=False, padding=0)(deconv1)
deconv1.shape
```
| github_jupyter |
```
from IPython.display import display, Javascript
display(Javascript('IPython.notebook.execute_cells_below()'))
```
# Al Dhafra optimzation study
# inputs
-----
```
%%html
<script>
// AUTORUN ALL CELLS ON NOTEBOOK-LOAD!
require(
['base/js/namespace', 'jquery'],
function(jupyter, $) {
$(jupyter.events).on("kernel_ready.Kernel", function () {
console.log("Auto-running all cells-below...");
jupyter.actions.call('jupyter-notebook:run-all-cells-below');
jupyter.actions.call('jupyter-notebook:save-notebook');
});
}
);
</script>
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
The raw code for this IPython notebook is by default hidden for easier reading.
To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.''')
%%html
<style>.button1 {font-size:18px;}</style>
```
#### Battery legend choices
```
from ipywidgets import widgets, Layout, ButtonStyle
import json
desired_battery_range = []
battery_range = range(300, 2100 + 1, 300)
battery_options = {}
hbox_list = []
def checkbox_event_handler(**kwargs):
desired_battery_range = []
for i in battery_options:
name = str(battery_options[i].description).split(" ")[0]
value = battery_options[i].value
if value is True:
desired_battery_range.append(name)
json.dump(desired_battery_range, open("desired_battery_range.json","w"))
for batt in battery_range:
option = '{} kWh'.format(batt)
battery_options[option] = widgets.Checkbox(
value=False,
description=option,
disabled=False,
indent=False
)
display(battery_options[option])
widgets.interactive_output(checkbox_event_handler,battery_options)
button = widgets.Button(description="Generate payback chart",
layout=Layout(width='300px', height='40px'),
button_style='warning',)
button.add_class('button1')
box_layout = widgets.Layout(display='flex',
flex_flow='column',
align_items='center',
width='100%')
box = widgets.HBox(children=[button],layout=box_layout)
display(box)
def on_button_clicked(b):
display(Javascript('IPython.notebook.execute_cells_below()'))
button.on_click(on_button_clicked)
import pandas as pd
import numpy as np
import json
from itertools import combinations
import math
import calendar
import os
import matplotlib.pyplot as plt
%matplotlib notebook
marginal_payback = True
with open('desired_battery_range.json') as file:
desired_battery_range = json.loads(file.read())
# key inverter, cost reduction on DG
DG_cost_reduction_per_month = {
'600': 15000 / 3.672,
'900': 15000 / 3.672,
'300': 2850,
}
fig = plt.figure(figsize=(10, 8))
with open('opt_results5.json') as file:
opt_cases = json.loads(file.read())
temp_opt_cases = {}
for item in opt_cases:
if opt_cases[item]['solar'] > 0 and opt_cases[item]['battery'] == 0:
temp_opt_cases[item] = opt_cases[item]
case_range = []
optimization_result = []
battery_legend = {}
solar_only_legend = []
battery_case_name = []
for key in opt_cases:
solar = opt_cases[key]['solar']
payback = opt_cases[key]['payback']
case_range.append(solar)
optimization_result.append(payback)
battery = opt_cases[key]['battery']
c_r = opt_cases[key]['c_r']
inv = opt_cases[key]['inverter']
investment = opt_cases[key]['investment']
if (str(inv) in DG_cost_reduction_per_month) and payback > 0:
margin = round(investment / payback, 0)
adjusted_margin = margin + (DG_cost_reduction_per_month[str(inv)] * 12)
payback = round(investment / adjusted_margin, 1)
if marginal_payback is True:
for item in temp_opt_cases:
if temp_opt_cases[item]['solar'] == solar and temp_opt_cases[item]['battery'] == 0 and battery > 0:
marginal_payback_value = (investment - temp_opt_cases[item]['investment']) / ((investment / payback) - (temp_opt_cases[item]['investment'] / temp_opt_cases[item]['payback']))
payback = marginal_payback_value
else:
continue
case_name = '{} kWh / {} C / {} kVA'.format(battery, c_r, inv)
if battery > 0 and str(battery) in desired_battery_range:
if case_name in battery_legend:
pass
else:
battery_legend[str(case_name)] = []
battery_legend[case_name].append([solar, payback, key])
elif battery == 0 and solar > 0:
solar_only_legend.append([solar, payback, key])
elif solar == 0 and battery == 0:
label = "{:.2f}".format(payback)
plt.annotate(label,
(solar, payback),
textcoords="offset points",
xytext=(0,10),
ha='center')
plt.plot(solar, payback, 'o-', label='DG only')
else:
pass
for group in battery_legend:
x = []
y = []
for case in battery_legend[group]:
x.append(case[0])
y.append(case[1])
label = "{:.2f}".format(case[1])
plt.annotate(label,
(case[0],case[1]),
textcoords="offset points",
xytext=(0,10),
ha='center')
plt.plot(x, y, 'o-', label=group)
solar_only_x_range = []
solar_only_y_range = []
for case in solar_only_legend:
solar_only_x_range.append(case[0])
solar_only_y_range.append(case[1])
label = "{:.2f}".format(case[1])
plt.annotate(label,
(case[0],case[1]),
textcoords="offset points",
xytext=(0,10),
ha='center')
plt.plot(solar_only_x_range, solar_only_y_range,'o-', label='Solar only')
plt.xticks(np.arange(min(case_range), max(case_range)+1, 115))
plt.yticks(np.arange(0, 10+1, 1))
plt.xlabel("Solar capacity (kWp)", fontdict=None, labelpad=5)
plt.ylabel("Payback (year)", fontdict=None, labelpad=5, rotation=0)
axPres = fig.add_subplot(111)
axPres.yaxis.set_label_coords(0,1.03)
fig.suptitle('Solar / Battery installed vs payback (Battery cases = marginal payback against solar only)', fontsize=16)
plt.grid()
plt.legend(loc='best')
plt.show()
```
| github_jupyter |
## Brute Force Attack Analysis - Standarized Vaults
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
from pylab import *
import itertools
from sklearn.metrics import confusion_matrix
from PlotUtils import *
import pathlib
import scipy.io
# Para cambiar el mapa de color por defecto
plt.rcParams["image.cmap"] = "Set2"
# Para cambiar el ciclo de color por defecto en Matplotlib
# plt.rcParams['axes.prop_cycle'] = plt.cycler(color=plt.cm.Set2.colors)
# plt.rcParams["axes.prop_cycle"] = plt.cycler('color', ['#8C6D31', '#ffdd6b', '#e9e2c9', '#dcae52', '#af7132', '#8C9363', '#637939', '#AD494A', '#E7969C', '#C4CBB9'])
# plt.rcParams["axes.prop_cycle"] = plt.cycler('color', ['#081d58', '#253494', '#225ea8', '#1d91c0', '#41b6c4', '#7fcdbb', '#c7e9b4', '#edf8b1', '#ffffd9'])
# plt.rcParams["axes.prop_cycle"] = plt.cycler('color', ['#084081', '#0868ac', '#2b8cbe', '#4eb3d3', '#7bccc4', '#a8ddb5', '#ccebc5', '#e0f3db', '#f7fcf0'])
# plt.rcParams["axes.prop_cycle"] = plt.cycler('color', ['#67001f', '#b2182b', '#d6604d', '#f4a582', '#fddbc7', '#d1e5f0', '#92c5de', '#4393c3', '#2166ac','#053061'])
# plt.rcParams["axes.prop_cycle"] = plt.cycler('color', ['#67001f', '#053061', '#b2182b', '#2166ac', '#d6604d', '#4393c3', '#f4a582', '#92c5de', '#fddbc7','#d1e5f0'])
plt.rcParams["axes.prop_cycle"] = plt.cycler('color', ['#67001f', '#053061', '#b2182b', '#2166ac', '#d6604d', '#4393c3', '#f4a582', '#92c5de', '#fddbc7','#d1e5f0'][::-1])
# plt.rcParams["axes.prop_cycle"] = plt.cycler('color', ['#8dd3c7', '#ffffb3', '#bebada', '#fb8072', '#80b1d3', '#fdb462', '#b3de69', '#fccde5', '#d9d9d9','#bc80bd','#ccebc5','#ffed6f'])
# plt.rcParams["axes.prop_cycle"] = plt.cycler('color', ['#8dd3c7', '#ffffb3', '#bebada', '#fb8072', '#80b1d3', '#fdb462', '#b3de69', '#fccde5', '#d9d9d9','#bc80bd','#ccebc5','#ffed6f'][::-1])
bf = pd.read_excel('ExpOctubre/StatisticsBruteForce/ResultsComplete_17-10-21.xlsx', engine='openpyxl')
bf[48:]
help(groupedBarPlot)
g1 = bf[:8]
groupedBarPlot({'IpV':g1['Iteraciones']}, g1['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g1['Tiempo (s)']/60}, g1['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 2, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
g2 = bf[8:16]
groupedBarPlot({'IpV':g2['Iteraciones']}, g2['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g2['Tiempo (s)']/60}, g2['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 2, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
g3 = bf[16:24]
groupedBarPlot({'IpV':g3['Iteraciones']}, g3['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g3['Tiempo (s)']/60}, g3['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), adBL = 2, width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
g4 = bf[24:32]
groupedBarPlot({'IpV':g4['Iteraciones']}, g4['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g4['Tiempo (s)']/60}, g4['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 2, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
g5 = bf[32:40]
groupedBarPlot({'IpV':g5['Iteraciones']}, g5['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g5['Tiempo (s)']/60}, g5['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 2, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
g6 = bf[40:48]
groupedBarPlot({'IpV':g6['Iteraciones']}, g6['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g6['Tiempo (s)']/60}, g6['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 2, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
g7 = bf[48:56]
groupedBarPlot({'IpV':g7['Iteraciones']}, g7['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g7['Tiempo (s)']/60}, g7['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 2,fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
g8 = bf[56:64]
groupedBarPlot({'IpV':g8['Iteraciones']}, g8['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g8['Tiempo (s)']/60}, g8['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 2, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
g9 = bf[64:72]
groupedBarPlot({'IpV':g9['Iteraciones']}, g9['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g9['Tiempo (s)']/60}, g9['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 3, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
g10 = bf[72:80]
groupedBarPlot({'IpV':g10['Iteraciones']}, g10['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g10['Tiempo (s)']/60}, g10['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 2, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
NCV = pd.read_csv('ExpOctubre/CiphVaultsBFStats/VaultsCiphResultsComplete_26-10-21.csv')
NCV2 = NCV[NCV['Count'] != 0]
NCV
groupedBarPlot({'IpV':NCV['Count']}, NCV['Vaults'],'Genuine points produced from encrypted vault', axislabels = ['Vaults','# of Genuine Points'], figsize = (15,6), width= 0.6, legend = False, xtick_rot = 90, fsizes={'axes':15,'xtick':10, 'ytick':11, 'font':11})
groupedBarPlot({'IpV':NCV2['Count']}, NCV2['Vaults'],'Genuine points produced from encrypted vault', axislabels = ['Vaults','# of Genuine Points'], figsize = (12,6.5), width= 0.6, legend = False, xtick_rot = 0, fsizes={'axes':15,'xtick':10, 'ytick':11, 'font':11})
groupedBarPlot({'IpV':NCV2['Total Combinations']}, NCV2['Vaults'],'Total posible combinations to breach encrypted vaults with genuine points found', axislabels = ['Vaults','# of Combinations'], figsize = (12,6.5), width= 0.6, legend = False, xtick_rot = 0, fsizes={'axes':15,'xtick':10, 'ytick':11, 'font':11})
groupedBarPlot({'IpV':NCV2['Mean Combinations']}, NCV2['Vaults'],'Mean expected combinations to breach encrypted vaults with genuine points found', axislabels = ['Vaults','# of Combinations'], figsize = (12,6.5), width= 0.6, legend = False, xtick_rot = 0, fsizes={'axes':15,'xtick':10, 'ytick':11, 'font':11})
brokenvaults = [0 for i in NCV['Count'] if i < (8+1)*2]
groupedBarPlot({'IpV':brokenvaults}, NCV['Vaults'],'Encrypted vaults to be breached', axislabels = ['Vaults','Breached/Not Breached'], figsize = (12,5), width= 0.6, legend = False, xtick_rot = 90, axisLim = {'ylim':[0,1]}, fsizes={'axes':15,'xtick':8, 'ytick':11, 'font':11})
```
## Brute Force Attack Analysis - Non-Standarized Vaults
```
bf = pd.read_excel('Pruebas/ResultsComplete_17-10-21.xlsx', engine='openpyxl')
bf
bfNC = bf[bf['Iteraciones'] != 0]
bfNC2 = bfNC[bfNC['Tiempo (s)'] != 0].reset_index().drop('index', 1)
bfNC2
g1 = bfNC2[:3]
groupedBarPlot({'IpV':g1['Iteraciones']}, g1['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g1['Tiempo (s)']/60}, g1['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 2, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
g2 = bfNC2[3:6]
groupedBarPlot({'IpV':g2['Iteraciones']}, g2['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g2['Tiempo (s)']/60}, g2['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 2, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
g3 = bfNC2[6:10]
groupedBarPlot({'IpV':g3['Iteraciones']}, g3['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g3['Tiempo (s)']/60}, g3['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 2, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
g4 = bfNC2[10:17]
groupedBarPlot({'IpV':g4['Iteraciones']}, g4['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g4['Tiempo (s)']/60}, g4['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 2, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
g5 = bfNC2[17:22]
groupedBarPlot({'IpV':g5['Iteraciones']}, g5['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g5['Tiempo (s)']/60}, g5['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 2, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
g6 = bfNC2[22:30]
groupedBarPlot({'IpV':g6['Iteraciones']}, g6['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g6['Tiempo (s)']/60}, g6['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 2, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
g7 = bfNC2[30:36]
groupedBarPlot({'IpV':g7['Iteraciones']}, g7['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g7['Tiempo (s)']/60}, g7['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 2, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
g8 = bfNC2[36:44]
groupedBarPlot({'IpV':g8['Iteraciones']}, g8['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g8['Tiempo (s)']/60}, g8['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 2, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
g9 = bfNC2[44:47]
groupedBarPlot({'IpV':g9['Iteraciones']}, g9['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g9['Tiempo (s)']/60}, g9['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 2, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
g10 = bfNC2[47:]
groupedBarPlot({'IpV':g10['Iteraciones']}, g10['Vault'],'Iterations to breach the vault', axislabels = ['Vaults','Iterations'], figsize = (10,6), width= 0.6, legend = False, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
groupedBarPlot({'TpV':g10['Tiempo (s)']/60}, g10['Vault'],'Time tooked to breach the vault', axislabels = ['Vaults', 'Minutes'], figsize = (10,6), width= 0.6, legend = False, adBL = 2, fsizes={'axes':15,'xtick':11, 'ytick':11, 'font':11})
bfNGP = list(bf[bf['Iteraciones'] == 0]['Vault'])
print('Vaults without enough points to calculate Lagrange interpolation:\n',bfNGP)
print('\n\n')
bfI = bf[bf['Tiempo (s)'] == 0]
bfI = list(bfI[bfI['Iteraciones'] != 0]['Vault'])
print('Vaults that take more than 1 million of iterations to be broken:\n',bfI)
```
| github_jupyter |
```
import os
import random
import torch
import torchvision.transforms as standard_transforms
import scipy.io as sio
import matplotlib
import pandas as pd
import misc.transforms as own_transforms
import warnings
from torch.autograd import Variable
from torch.utils.data import DataLoader
from PIL import Image, ImageOps
from matplotlib import pyplot as plt
from tqdm import trange, tqdm
from misc.utils import *
from models.CC import CrowdCounter
from config import cfg
import CCAugmentation as cca
from datasets.SHHB.setting import cfg_data
from load_data import CustomDataset
torch.cuda.set_device(0)
torch.backends.cudnn.benchmark = True
warnings.filterwarnings('ignore')
mean_std = ([0.452016860247, 0.447249650955, 0.431981861591],[0.23242045939, 0.224925786257, 0.221840232611])
img_transform = standard_transforms.Compose([
standard_transforms.ToTensor(),
standard_transforms.Normalize(*mean_std)
])
restore = standard_transforms.Compose([
own_transforms.DeNormalize(*mean_std),
standard_transforms.ToPILImage()
])
pil_to_tensor = standard_transforms.ToTensor()
model_path = './exp/11-26_06-00_SHHB_MCNN_0.0001_[noAug]/all_ep_146_mae_23.91_mse_35.70.pth'
model_path = './exp/11-26_06-57_SHHB_MCNN_0.0001_[noAug]/all_ep_175_mae_17.92_mse_26.94.pth'
model_path = './exp/11-26_07-42_SHHB_MCNN_0.0001_[noAug]/all_ep_171_mae_18.16_mse_29.66.pth'
model_path = './exp/11-27_09-59_SHHB_MCNN_0.0001_[flipLR]/all_ep_180_mae_18.34_mse_30.49.pth'
model_path = './exp/11-27_10-44_SHHB_MCNN_0.0001_[flipLR]/all_ep_181_mae_19.11_mse_33.26.pth'
# model_path = './exp/11-27_11-30_SHHB_MCNN_0.0001_[flipLR]/all_ep_180_mae_18.16_mse_30.61.pth'
net = CrowdCounter(cfg.GPU_ID,cfg.NET)
net.load_state_dict(torch.load(model_path))
net.cuda()
net.eval()
val_pipeline = cca.Pipeline(
cca.examples.loading.SHHLoader("/dataset/ShanghaiTech", "test", "B"), []
).execute_generate()
val_loader = DataLoader(CustomDataset(val_pipeline), batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=1, drop_last=False)
val_img = list(val_loader)
start = 0
N = 3
for vi, data in enumerate(val_img[start:start+N], 0):
img, gt_map = data
with torch.no_grad():
img = Variable(img).cuda()
pred_map = net.test_forward(img)
pred_map = pred_map.data.cpu().numpy()
new_img = img.data.cpu().numpy()
new_img = np.moveaxis(new_img, 1, 2)
new_img = np.moveaxis(new_img, 2, 3)
new_img = np.squeeze(new_img)[:,:,::-1]
pred_cnt = np.sum(pred_map[0])/100.0
gt_count = np.sum(gt_map.data.cpu().numpy())/100.0
fg, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(16, 5))
plt.suptitle(' '.join([
'count_label:', str(round(gt_count, 3)),
'count_prediction:', str(round(pred_cnt, 3))
]))
ax0.imshow(np.uint8(new_img))
ax1.imshow(np.squeeze(gt_map), cmap='jet')
ax2.imshow(np.squeeze(pred_map), cmap='jet')
plt.show()
mae = np.empty(len(val_img))
mse = np.empty(len(val_img))
for vi, data in enumerate(tqdm(val_img), 0):
img, gt_map = data
with torch.no_grad():
img = Variable(img).cuda()
pred_map = net.test_forward(img)
pred_map = pred_map.data.cpu().numpy()
pred_cnt = np.sum(pred_map[0])/100.0
gt_count = np.sum(gt_map.data.cpu().numpy())/100.0
mae[vi] = np.abs(gt_count-pred_cnt)
mse[vi] = (gt_count-pred_cnt)**2
print('MAE:', round(mae.mean(),2))
print('MSE:', round(np.sqrt(mse.mean()),2))
```
| github_jupyter |
### At least three questions related to business or real-world applications of how the data could be used.
## Preparing Data
```
#import necessary libraries
#import pandas package as pd
import pandas as pd
#import the numpy package as np
import numpy as np
#reading the csv file
stackoverflow=pd.read_csv('C:/Users/anura/Downloads/survey_results_public.csv/survey_results_public.csv')
#Inspecting first 5 rows
stackoverflow.head()
```
## Handing Missing Values
```
#Missing value sum of particular columns
stackoverflow.isna().sum()
#Filling the missing value with foreward and backward fill because there are categorical value also .
#We are not droping it as there is large percentage of missing values in our data set
#Foreward and Backward fill filled the missing values with upper and lower rows.
#We are not taking the mean as their is categorical values which we can't take the mean
stackoverflow.fillna(method='ffill')
stackoverflow.fillna(method='bfill')
```
## Data Understanding
```
#All the Columns in the dataframe
for colname in stackoverflow:
print(colname)
#dimensiom of the dataframe which is 51392 rows and 154 columns
stackoverflow.shape
#reading the csv file where it explains each columns in details
schema=pd.read_csv('C:/Users/anura/Downloads/survey_results_schema.csv')
schema.head()
```
## Evaluating Results and Business Understanding
### Question 1
### What is the average salary of particular UnderGrad Major and what is the average salary of particular Formal Education?
#### Average salary of UnderGrad Major and Average salary according to Formal Education
```
#Number of values of a particular major
stackoverflow.MajorUndergrad.value_counts()
#Undergrad major avg salary
MajorAvgSalary=stackoverflow.groupby('MajorUndergrad')['Salary'].mean().sort_values(ascending=False)
MajorAvgSalary
#Plotting the graph horizontally
MajorAvgSalary.plot(kind='barh')
#Number of value of particular Formal Education
stackoverflow['FormalEducation'].value_counts()
#Particular Formal Education average salary
FormalAvgSalary=stackoverflow.groupby('FormalEducation')['Salary'].mean().sort_values(ascending=False)
FormalAvgSalary
#Plotting the graph horizontally
FormalAvgSalary.plot(kind='barh')
```
##### Result :
Psychology have the highest average salary among Majors in undergrad.
Doctoral degree is the highest average salary among Formal Education.
## Question 2
### Which company size and company type have highest salary?
```
#Number of company of particular company size
stackoverflow['CompanySize'].value_counts()
#Number of companyType
stackoverflow['CompanyType'].value_counts()
#Avg salary of company size and company type
stackoverflow.groupby(['CompanySize','CompanyType'])['Salary'].mean().sort_values(ascending=False)
#Grouping Company Type and Company size and finding average salary in descending order
stackoverflow.groupby(['CompanyType','CompanySize'])['Salary'].mean().sort_values(ascending=False)
```
#### Result :
Pre-series A startup have the highest average salary among company type.
In Venture-funded startup , 5,000 to 9,999 employees have the highest average salary among company size.
### Question 3
###### How many years of programming and formal education is highest average salary?
```
#NUmber of YearsProgram counts
stackoverflow['YearsProgram'].value_counts()
#Avg salary with years Program and formal education
AvgYearEdu=stackoverflow.groupby(['YearsProgram','FormalEducation'])['Salary'].mean().sort_values(ascending=False)
AvgYearEdu
```
#### Result:
17-18 years with Professional degree have highest average salary.
## Question 4
#### Which country have the highest average salary for WorkPayCare?
```
#Number of particular WorkPayCare
stackoverflow['WorkPayCare'].value_counts()
#Avg salary of WorkPayCare
stackoverflow.groupby('WorkPayCare')['Salary'].mean().sort_values(ascending=False)
#Average salary with country and workpaycare
stackoverflow.groupby(['Country','WorkPayCare'])['Salary'].mean().sort_values(ascending=False)
```
### Result:
In Puerto Rico have the highest avergae salary and they agree that they were give Work Pay care.
| github_jupyter |
# Pandas and Scikit-learn
Pandas is a Python library that contains high-level data structures and manipulation tools designed for data analysis. Think of Pandas as a Python version of Excel. Scikit-learn, on the other hand, is an open-source machine learning library for Python.
While Scikit-learn does a lot of the heavy lifting, what's equally important is ensuring that raw data is processed in such a way that we are able to 'feed' it to Scikit-learn. Hence, the ability to manipulate raw data with Pandas makes it an indispensible part of our toolkit.
# Kaggle
Kaggle is the leading platform for data science competitions. Participants compete for cash prizes by submitting the best predictive model to problems posted on the competition website.
https://www.kaggle.com/competitions
Learning machine learning via Kaggle problems allows us to take a highly-directed approach because:
1. The problems are well-defined and the data is provided, allowing us to immediately focus on manipulating the data, and
2. The leaderboard allows us to keep track of how well we're doing.
In the following set of exercises, we will be reviewing the data from the Kaggle Titanic competition. Our aim is to make predictions on whether or not specific passengers on the Titanic survived, based on characteristics such as age, sex and class.
# Section 1-0 - First Cut
We will start by processing the training data, after which we will be able to use to 'train' (or 'fit') our model. With the trained model, we apply it to the test data to make the predictions. Finally, we output our predictions into a .csv file to make a submission to Kaggle and see how well they perform.
It is very common to encounter missing values in a data set. In this section, we will take the simplest (or perhaps, simplistic) approach of ignoring the whole row if any part of it contains an NA value. We will build on this approach in later sections.
## Pandas - Extracting data
First, we load the training data from a .csv file. This is the similar to the data found on the Kaggle website:
https://www.kaggle.com/c/titanic-gettingStarted/data
```
import pandas as pd
import numpy as np
df = pd.read_csv('../data/train.csv')
```
## Pandas - Cleaning data
We then review a selection of the data.
```
df.head(10)
```
We notice that the columns describe features of the Titanic passengers, such as age, sex, and class. Of particular interest is the column Survived, which describes whether or not the passenger survived. When training our model, what we are essentially doing is assessing how each feature impacts whether or not the passenger survived (or if the feature makes an impact at all).
**Exercise**:
- Write the code to review the tail-end section of the data.
We observe that the columns Name and Cabin are, for our current purposes, irrelevant. We proceed to remove them from our data set.
```
df = df.drop(['Name', 'Ticket', 'Cabin'], axis=1)
```
Next, we review the type of data in the columns, and their respective counts.
```
df.info()
```
We notice that the columns Age and Embarked have NAs or missing values. As previously discussed, we take the approach of simply removing the rows with missing values.
```
df = df.dropna()
```
**Question**
- If you were to fill in the missing values, with what values would you fill them with? Why?
Scikit-learn only takes numerical arrays as inputs. As such, we would need to convert the categorical columns Sex and Embarked into numerical ones. We first review the range of values for the column Sex, and create a new column that represents the data as numbers.
```
df['Sex'].unique()
df['Gender'] = df['Sex'].map({'female': 0, 'male':1}).astype(int)
```
Similarly for Embarked, we review the range of values and create a new column called Port that represents, as a numerical value, where each passenger embarks from.
```
df['Embarked'].unique()
df['Port'] = df['Embarked'].map({'C':1, 'S':2, 'Q':3}).astype(int)
```
**Question**
- What problems might we encounter by mapping C, S, and Q in the column Embarked to the values 1, 2, and 3? In other words, what does the ordering imply? Does the same problem exist for the column Sex?
Now that we have numerical columns that encapsulate the information provided by the columns Sex and Embarked, we can proceed to drop them from our data set.
```
df = df.drop(['Sex', 'Embarked'], axis=1)
```
We review the columns our final, processed data set.
```
cols = df.columns.tolist()
print(cols)
```
For convenience, we move the column Survived to the left-most column. We note that the left-most column is indexed as 0.
```
cols = [cols[1]] + cols[0:1] + cols[2:]
df = df[cols]
```
In our final review of our training data, we check that (1) the column Survived is the left-most column (2) there are no NA values, and (3) all the values are in numerical form.
```
df.head(10)
df.info()
```
Finally, we convert the processed training data from a Pandas dataframe into a numerical (Numpy) array.
```
train_data = df.values
```
## Scikit-learn - Training the model
In this section, we'll simply use the model as a black box. We'll review more sophisticated techniques in later sections.
Here we'll be using the Random Forest model. The intuition is as follows: each feature is reviewed to see how much impact it makes to the outcome. The most prominent feature is segmented into a 'branch'. A collection of branches is a 'tree'. The Random Forest model, broadly speaking, creates a 'forest' of trees and aggregates the results.
http://en.wikipedia.org/wiki/Random_forest
```
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators = 100)
```
We use the processed training data to 'train' (or 'fit') our model. The column Survived will be our first input, and the set of other features (with the column PassengerId omitted) as the second.
```
model = model.fit(train_data[0:,2:],train_data[0:,0])
```
## Scikit-learn - Making predictions
We first load the test data.
```
df_test = pd.read_csv('../data/test.csv')
```
We then review a selection of the data.
```
df_test.head(10)
```
We notice that test data has columns similar to our training data, but not the column Survived. We'll use our trained model to predict values for the column Survived.
As before, we process the test data in a similar fashion to what we did to the training data.
```
df_test = df_test.drop(['Name', 'Ticket', 'Cabin'], axis=1)
df_test = df_test.dropna()
df_test['Gender'] = df_test['Sex'].map({'female': 0, 'male':1})
df_test['Port'] = df_test['Embarked'].map({'C':1, 'S':2, 'Q':3})
df_test = df_test.drop(['Sex', 'Embarked'], axis=1)
test_data = df_test.values
```
We now apply the trained model to the test data (omitting the column PassengerId) to produce an output of predictions.
```
output = model.predict(test_data[:,1:])
```
## Pandas - Preparing submission
We simply create a Pandas dataframe by combining the index from the test data with the output of predictions.
```
result = np.c_[test_data[:,0].astype(int), output.astype(int)]
df_result = pd.DataFrame(result[:,0:2], columns=['PassengerId', 'Survived'])
```
We briefly review our predictions.
```
df_result.head(10)
```
Finally, we output our results to a .csv file.
```
df_result.to_csv('../results/titanic_1-0.csv', index=False)
```
However, it appears that we have a problem. The Kaggle submission website expects "the solution file to have 418 predictions."
https://www.kaggle.com/c/titanic-gettingStarted/submissions/attach
We compare this to our result.
```
df_result.shape
```
Since we eliminated the rows containing NAs, we end up with a set of predictions with a smaller number of rows compared to the test data. As Kaggle requires all 418 predictions, we are unable to make a submission.
In this section, we took the simplest approach of ignoring missing values, but fail to produce a complete set of predictions. We look to build on this approach in Section 1-1.
| github_jupyter |
###### Content under Creative Commons Attribution license CC-BY 4.0, code under BSD 3-Clause License © 2017 L.A. Barba, N.C. Clementi
# Life expectancy and wealth
Welcome to **Lesson 4** of the second module in _Engineering Computations_. This module gives you hands-on data analysis experience with Python, using real-life applications. The first three lessons provide a foundation in data analysis using a computational approach. They are:
1. [Lesson 1](http://go.gwu.edu/engcomp2lesson1): Cheers! Stats with beers.
2. [Lesson 2](http://go.gwu.edu/engcomp2lesson2): Seeing stats in a new light.
3. [Lesson 3](http://go.gwu.edu/engcomp2lesson3): Lead in lipstick.
You learned to do exploratory data analysis with data in the form of arrays: NumPy has built-in functions for many descriptive statistics, making it easy! And you also learned to make data visualizations that are both good-looking and effective in communicating and getting insights from data.
But NumPy can't do everything. So we introduced you to `pandas`, a Python library written _especially_ for data analysis. It offers a very powerful new data type: the _DataFrame_—you can think of it as a spreadsheet, conveniently stored in one Python variable.
In this lesson, you'll dive deeper into `pandas`, using data for life expectancy and per-capita income over time, across the world.
## The best stats you've ever seen
[Hans Rosling](https://en.wikipedia.org/wiki/Hans_Rosling) was a professor of international health in Sweeden, until his death in Februrary of this year. He came to fame with the thrilling TED Talk he gave in 2006: ["The best stats you've ever seen"](https://www.ted.com/talks/hans_rosling_shows_the_best_stats_you_ve_ever_seen) (also on [YouTube](https://youtu.be/RUwS1uAdUcI), with ads). We highly recommend that you watch it!
In that first TED Talk, and in many other talks and even a BBC documentary (see the [trailer](https://youtu.be/jbkSRLYSojo) on YouTube), Rosling uses data visualizations to tell stories about the world's health, wealth, inequality and development. Using software, he and his team created amazing animated graphics with data from the United Nations and World Bank.
According to a [blog post](https://www.gatesnotes.com/About-Bill-Gates/Remembering-Hans-Rosling) by Bill and Melinda Gates after Prof. Rosling's death, his message was simple: _"that the world is making progress, and that policy decisions should be grounded in data."_
In this lesson, we'll use data about life expectancy and per-capita income (in terms of the gross domestic product, GDP) around the world. Visualizing and analyzing the data will be our gateway to learning more about the world we live in.
Let's begin! As always, we start by importing the Python libraries for data analysis (and setting some plot parameters).
```
import numpy
import pandas
from matplotlib import pyplot
%matplotlib inline
#Import rcParams to set font styles
from matplotlib import rcParams
#Set font style and size
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 16
```
## Load and inspect the data
We found a website called [The Python Graph Gallery](https://python-graph-gallery.com), which has a lot of data visualization examples.
Among them is a [Gapminder Animation](https://python-graph-gallery.com/341-python-gapminder-animation/), an animated GIF of bubble charts in the style of Hans Rosling.
We're not going to repeat the same example, but we do get some ideas from it and re-use their data set.
The data file is hosted on their website, and we can read it directly from there into a `pandas` dataframe, using the URL.
```
# Read a dataset for life expectancy from a CSV file hosted online
url = 'https://python-graph-gallery.com/wp-content/uploads/gapminderData.csv'
life_expect = pandas.read_csv(url)
```
The first thing to do always is to take a peek at the data.
Using the `shape` attribute of the dataframe, we find out how many rows and columns it has. In this case, it's kind of big to print it all out, so to save space we'll print a small portion of `life_expect`.
You can use a slice to do this, or you can use the [`DataFrame.head()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.head.html) method, which returns by default the first 5 rows.
```
life_expect.shape
life_expect.head()
```
You can see that the columns hold six types of data: the country, the year, the population, the continent, the life expectancy, and the per-capita gross domestic product (GDP).
Rows are indexed from 0, and the columns each have a **label** (also called an index). Using labels to access data is one of the most powerful features of `pandas`.
In the first five rows, we see that the country repeats (Afghanistan), while the year jumps by five. We guess that the data is arranged in blocks of rows for each country.
We can get a useful summary of the dataframe with the [`DataFrame.info()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.info.html) method: it tells us the number of rows and the number of columns (matching the output of the `shape` attribute) and then for each column, it tells us the number of rows that are populated (have non-null entries) and the type of the entries; finally it gives a breakdown of the types of data and an estimate of the memory used by the dataframe.
```
life_expect.info()
```
The dataframe has 1704 rows, and every column has 1704 non-null entries, so there is no missing data. Let's find out how many entries of the same year appear in the data.
In [Lesson 1](http://go.gwu.edu/engcomp2lesson1) of this module, you already learned to extract a column from a data frame, and use the [`series.value_counts()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.value_counts.html) method to answer our question.
```
life_expect['year'].value_counts()
```
We have an even 142 occurrences of each year in the dataframe. The distinct entries must correspond to each country. It also is clear that we have data every five years, starting 1952 and ending 2007. We think we have a pretty clear picture of what is contained in this data set. What next?
## Grouping data for analysis
We have a dataframe with a `country` column, where countries repeat in blocks of rows, and a `year` column, where sets of 12 years (increasing by 5) repeat for every country. Tabled data commonly has this interleaved structure. And data analysis often involves grouping the data in various ways, to transform it, compute statistics, and visualize it.
With the life expectancy data, it's natural to want to analyze it by year (and look at geographical differences), and by country (and look at historical differences).
In [Lesson 2](http://go.gwu.edu/engcomp2lesson2) of this module, we already learned how useful it was to group the beer data by style, and calculate means within each style. Let's get better acquainted with the powerful `groupby()` method for dataframes. First, grouping by the values in the `year` column:
```
by_year = life_expect.groupby('year')
type(by_year)
```
Notice that the type of the new variable `by_year` is different: it's a _GroupBy_ object, which—without making a copy of the data—is able to apply operations on each of the groups.
The [`GroupBy.first()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.core.groupby.GroupBy.first.html) method, for example, returns the first row in each group—applied to our grouping `by_year`, it shows the list of years (as a label), with the first country that appears in each year-group.
```
by_year.first()
```
All the year-groups have the same first country, Afghanistan, so what we see is the population, life expectancy and per-capita income in Afghanistan for all the available years.
Let's save that into a new dataframe, and make a line plot of the population and life expectancy over the years.
```
Afghanistan = by_year.first()
Afghanistan['pop'].plot(figsize=(8,4),
title='Population of Afghanistan');
Afghanistan['lifeExp'].plot(figsize=(8,4),
title='Life expectancy of Afghanistan');
```
Do you notice something interesting? It's curious to see that the population of Afghanistan took a fall after 1977. We have data every 5 years, so we don't know exactly when this fall began, but it's not hard to find the answer online. The USSR invaded Afghanistan in 1979, starting a conflict that lasted 9 years and resulted in an estimated death toll of one million civilians and 100,000 fighters [1]. Millions fled the war to neighboring countries, which may explain why we se a dip in population, but not a dip in life expectancy.
We can also get some descriptive statistics in one go with the [`DataFrame.describe()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.describe.html) method of `pandas`.
```
Afghanistan.describe()
```
Let's now group our data by country, and use the `GroupBy.first()` method again to get the first row of each group-by-country. We know that the first year for which we have data is 1952, so let's immediately save that into a new variable named `year1952`, and keep playing with it. Below, we double-check the type of `year1952`, print the first five rows using the `head()` method, and get the minimum value of the population column.
```
by_country = life_expect.groupby('country')
```
The first year for all groups-by-country is 1952. Let's save that first group into a new dataframe, and keep playing with it.
```
year1952 = by_country.first()
type(year1952)
year1952.head()
year1952['pop'].min()
```
## Visualizing the data
In [Lesson 3](http://go.gwu.edu/engcomp2lesson3) of this module, you learned to make bubble charts, allowing you to show at least three features of the data in one plot. We'd like to make a bubble chart of life expectancy vs. per-capita GDP, with the size of the bubble proportional to the population. To do that, we'll need to extract the population values into a NumPy array.
```
populations = year1952['pop'].values
```
If you use the `populations` array unmodified as the size of the bubbles, they come out _huge_ and you get one solid color covering the figure (we tried it!). To make the bubble sizes reasonable, we divide by 60,000—an approximation to the minimum population—so the smallest bubble size is about 1 pt. Finally, we choose a logarithmic scale in the absissa (the GDP). Check it out!
```
year1952.plot.scatter(figsize=(12,8),
x='gdpPercap', y='lifeExp', s=populations/60000,
title='Life expectancy in the year 1952',
edgecolors="white")
pyplot.xscale('log');
```
That's neat! But the Rosling bubble charts include one more feature in the data: the continent of each country, using a color scheme. Can we do that?
Matplotlib [colormaps](https://matplotlib.org/examples/color/colormaps_reference.html) offer several options for _qualitative_ data, using discrete colors mapped to a sequence of numbers. We'd like to use the `Accent` colormap to code countries by continent. But we need a numeric code to assign to each continent, so it can be mapped to a color.
The [Gapminder Animation](https://python-graph-gallery.com/341-python-gapminder-animation/) example at The Python Graph Gallery has a good tip: using the `pandas` _Categorical_ data type, which associates a numerical value for each category in a column containing qualitative (categorical) data.
Let's see what we get if we apply `pandas.Categorical()` to the `continent` column:
```
pandas.Categorical(year1952['continent'])
```
Right. We see that the `continent` column has repeated entries of 5 distinct categories, one for each continent. In order, they are: Africa, Americas, Asia, Europe, Oceania.
Applying [`pandas.Categorical()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Categorical.html) to the `continent` column will create an integer value—the _code_ of the category—associated to each entry. We can then use these integer values to map to the colors in a colormap. The trick will be to extract the `codes` attribute of the _Categorical_ data and save that into a new variable named `colors` (a NumPy array).
```
colors = pandas.Categorical(year1952['continent']).codes
type(colors)
len(colors)
print(colors)
```
You see that `colors` is a NumPy array of 142 integers that can take the values: ` 0, 1, 2, 3, 4`. They are the codes to `continent` categories: `Africa, Americas, Asia, Europe, Oceania`. For example, the first entry is `2`, corresponding to Asia, the continent of Afghanistan.
Now we're ready to re-do our bubble chart, using the array `colors` to set the color of the bubble (according to the continent for the given country).
```
year1952.plot.scatter(figsize=(12,8),
x='gdpPercap', y='lifeExp', s=populations/60000,
c=colors, cmap='Accent',
title='Life expectancy vs. per-capita GDP in the year 1952,\n color-coded by continent',
logx = 'True',
ylim = (25,85),
xlim = (1e2, 1e5),
edgecolors="white",
alpha=0.6);
```
##### Note:
We encountered a bug in `pandas` scatter plots! The labels of the $x$-axis disappeared when we added the colors to the bubbles. We tried several things to fix it, like adding the line `pyplot.xlabel("GDP per Capita")` at the end of the cell, but nothing worked. Searching online, we found an open [issue report](https://github.com/pandas-dev/pandas/issues/10611) for this problem.
##### Discuss with your neighbor:
What do you see in the colored bubble chart, in regards to 1952 conditions in different countries and different continents?
Can you guess some countries? Can you figure out which color corresponds to which continent?
### Spaghetti plot of life expectancy
The bubble plot shows us that 1952 life expectancies varied quite a lot from country to country: from a minimum of under 30 years, to a maximum under 75 years. The first part of Prof. Rosling's dying message is _"that the world is making progress_." Is it the case that countries around the world _all_ make progress in life expectancy over the years?
We have an idea: what if we plot a line of life expectancy over time, for every country in the data set? It could be a bit messy, but it may give an _overall view_ of the world-wide progress in life expectancy.
Below, we'll make such a plot, with 142 lines: one for each country. This type of graphic is called a **spaghetti plot** …for obvious reasons!
To add a line for each country on the same plot, we'll use a `for`-statement and the `by_country` groups. For each country-group, the line plot takes the series `year` and `lifeExp` as $(x,y)$ coordinates. Since the spaghetti plot is quite busy, we also took off the box around the plot. Study this code carefully.
```
pyplot.figure(figsize=(12,8))
for key,group in by_country:
pyplot.plot(group['year'], group['lifeExp'], alpha=0.4)
pyplot.title('Life expectancy in the years 1952–2007, across 142 countries')
pyplot.box(on=None);
```
## Dig deeper and get insights from the data
The spaghetti plot shows a general upwards tendency, but clearly not all countries have a monotonically increasing life expectancy. Some show a one-year sharp drop (but remember, this data jumps every 5 years), while others drop over several years.
And something catastrophic happened to one country in 1977, and to another country in 1992.
Let's investigate this!
We'd like to explore the data for a particular year: first 1977, then 1992. For those years, we can get the minimum life expectancy, and then find out which country experienced it.
To access a particular group in _GroupBy_ data, `pandas` has a `get_group(key)` method, where `key` is the label of the group.
For example, we can access yearly data from the `by_year` groups using the year as key. The return type will be a dataframe, containing the same columns as the original data.
```
type(by_year.get_group(1977))
type(by_year['lifeExp'].get_group(1977))
```
Now we can find the minimum value of life expectancy at the specific years of interest, using the `Series.min()` method. Let' do this for 1977 and 1992, and save the values in new Python variables, to reuse later.
```
min_lifeExp1977 = by_year['lifeExp'].get_group(1977).min()
min_lifeExp1977
min_lifeExp1992 = by_year['lifeExp'].get_group(1992).min()
min_lifeExp1992
```
Those values of life expectancy are just terrible! Are you curious to know what countries experienced the dramatic drops in life expectancy?
We can find the row _index_ of the minimum value, thanks to the [`pandas.Series.idxmin()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.idxmin.html) method. The row indices are preserved from the original dataframe `life_expect` to its groupings, so the index will help us identify the country. Check it out.
```
by_year['lifeExp'].get_group(1977).idxmin()
life_expect['country'][221]
by_country.get_group('Cambodia')
```
We searched online to learn what was happening in Cambodia to cause such a drop in life expectancy in the 1970s. Indeed, Cambodia experienced a _mortality crisis_ due to several factors that combined into a perfect storm: war, ethnic cleansing and migration, collapse of the health system, and cruel famine [2].
It's hard for a country to keep vital statistics under such circumstances, and certainly there are uncertainties in the data for Cambodia in the 1970s.
However, various sources report a life expectancy there in 1977 that was _under 20 years_.
See, for example, the World Bank's interactive web page on [Cambodia](https://data.worldbank.org/country/cambodia).
There is something strange with the data from the The Python Graph Gallery. Is it wrong?
Maybe they are giving us _average_ life expectancy in a five-year period.
Let's look at the other dip in life expectancy, in 1992.
```
by_year['lifeExp'].get_group(1992).idxmin()
life_expect['country'][1292]
by_country.get_group('Rwanda')
```
The World Bank's interactive web page on [Rwanda](https://data.worldbank.org/country/rwanda) gives a life expectancy of 28.1 in 1992, and even lower in 1993, at 27.6 years.
This doesn't match the value from the data set we sourced from The Python Graph Gallery, which gives 23.6—and since this value is _lower_ than the minimum value given by the World Bank, we conclude that the discepancy is not caused by 5-year averaging.
## Checking data quality
All our work here started with loading a data set we found online. What if this data set has _quality_ problems?
Well, nothing better than asking the author of the web source for the data. We used Twitter to communicate with the author of The Python Graph Gallery, and he replied with a link to _his source_: a data package used for teaching a course in Exploratory Data Analysis at the University of British Columbia.
```
%%html
<blockquote class="twitter-tweet" data-lang="en"><p lang="en" dir="ltr">Hi. Didn't receive your email... Gapminder comes from this R library: <a href="https://t.co/BU1IFIGSxm">https://t.co/BU1IFIGSxm</a>. I will add citation asap.</p>— R+Py Graph Galleries (@R_Graph_Gallery) <a href="https://twitter.com/R_Graph_Gallery/status/920074231269941248?ref_src=twsrc%5Etfw">October 16, 2017</a></blockquote> <script async src="//platform.twitter.com/widgets.js" charset="utf-8"></script>
```
Note one immediate outcome of our reaching out to the author of The Python Graph Gallery: he realized he was not citing the source of his data [3], and promised to add proper credit. _It's always good form to credit your sources!_
We visited the online repository of the data source, and posted an [issue report](https://github.com/jennybc/gapminder/issues/18) there, with our questions about data quality. The author promptly responded, saying that _her_ source was the [Gapminder.org website](http://www.gapminder.org/data/)—**Gapminder** is the non-profit founded by Hans Rosling to host public data and visualizations. She also said: _" I don't doubt there could be data quality problems! It should definitely NOT be used as an authoritative source for life expectancy"_
So it turns out that the data we're using comes from a set of tools meant for teaching, and is not up-to-date with the latest vital statistics. The author ended up [adding a warning](https://github.com/jennybc/gapminder/commit/7b3ac7f477c78f21865fa7defea20e72cb9e2b8a) to make this clear to visitors of the repository on GitHub.
#### This is a wonderful example of how people collaborate online via the open-source model.
##### Note:
For the most accurate data, you can visit the website of the [World Bank](https://data.worldbank.org).
## Using widgets to visualize interactively
One more thing! This whole exploration began with our viewing the 2006 TED Talk by Hans Rosling: ["The best stats you've ever seen"](https://www.ted.com/talks/hans_rosling_shows_the_best_stats_you_ve_ever_seen). One of the most effective parts of the presentation is seeing the _animated_ bubble chart, illustrating how countries became healthier and richer over time. Do you want to make something like that?
You can! Introducing [Jupyter Widgets](https://ipywidgets.readthedocs.io/en/latest/user_guide.html). The magic of interactive widgets is that they tie together the running Python code in a Jupyter notebook with Javascript and HTML running in the browser. You can use widgets to build interactive controls on data visualizations, with buttons, sliders, and more.
To use widgets, the first step is to import the `widgets` module.
```
from ipywidgets import widgets
```
After importing `widgets`, you have available several UI (User Interaction) elements. One of our favorites is a _Slider_: an interactive sliding button. Here is a default slider that takes integer values, from 0 to 100 (but does nothing):
```
widgets.IntSlider()
```
What we'd like to do is make an interactive visualization of bubble charts, with the year in a slider, so that we can run forwards and backwards in time by sliding the button, watching our plot update the bubbles in real time. Sound like magic? It almost is.
The magic happens when you program what should happen when the value in the slider changes. A typical scenario is having a function that is executed with the value in the slider, interactively. To create that, we need two things:
1. A function that will be called with the slider values, and
2. A call to an _interaction_ function from the `ipywidgets` package.
Several interaction functions are available, for different actions you expect from the user: a click, a text entered in a box, or sliding the button on a slider.
You will need to explore the Jupyter Widgets documentation [4] to learn more.
For this example, we'll be using a slider, a plotting function that makes our bubble chart, and the [`.interact()`](http://ipywidgets.readthedocs.io/en/stable/examples/Using%20Interact.html#) function to call our plotting function with each value of the slider.
We do everything in one cell below. The first line creates an integer-value slider with our known years—from a minimum 1952, to a maximum 2007, stepping by 5—and assigns it to the variable name `slider`.
Next, we define the function `roslingplot()`, which re-calculates the array of population values, gets the year-group we need from the `by_year` _GroupBy_ object, and makes a scater plot of life expectancy vs. per-capita income, like we did above. The `populations` array (divided by 60,000) sets the size of the bubble, and the previously defined `colors` array sets the color coding by continent.
We also removed the colorbar (which added little information), and added the option `sharex=False` following the workaround suggested by someone on the open [issue report](https://github.com/pandas-dev/pandas/issues/10611) for the plotting bug we mentioned above.
The last line in the cell below is a call to `.interact()`, passing our plotting function and the slider value assigned to its argument, `year`. Watch the magic happen!
```
slider = widgets.IntSlider(min=1952, max=2007, step=5)
def roslingplot(year):
populations = by_year.get_group(year)['pop'].values
by_year.get_group(year).plot.scatter(figsize=(12,8),
x='gdpPercap', y='lifeExp', s=populations/60000,
c=colors, cmap='Accent',
title='Life expectancy vs per-capita GDP in the year '+ str(year)+'\n',
logx = 'True',
ylim = (25,85),
xlim = (1e2, 1e5),
edgecolors="white",
alpha=0.6,
colorbar=False,
sharex=False)
pyplot.show();
widgets.interact(roslingplot, year=slider);
```
## References
1. [The Soviet War in Afghanistan, 1979-1989](https://www.theatlantic.com/photo/2014/08/the-soviet-war-in-afghanistan-1979-1989/100786/), The Atlantic (2014), by Alan Taylor.
2. US National Research Council Roundtable on the Demography of Forced Migration; H.E. Reed, C.B. Keely, editors. Forced Migration & Mortality (2001), National Academies Press, Washington DC; Chapter 5: The Demographic Analysis of Mortality Crises: The Case of Cambodia, 1970-1979, Patrick Heuveline. Available at: https://www.ncbi.nlm.nih.gov/books/NBK223346/
3. gapminder R data package. Licensed CC-BY 3.0 by Jennifer (Jenny) Bryan (2015) https://github.com/jennybc/gapminder
4. [Jupyter Widgets User Guide](https://ipywidgets.readthedocs.io/en/latest/user_guide.html)
```
# Execute this cell to load the notebook's style sheet, then ignore it
from IPython.core.display import HTML
css_file = '../../style/custom.css'
HTML(open(css_file, "r").read())
```
| github_jupyter |
```
%matplotlib inline
```
# Generating an input file
This examples shows how to generate an input file in HDF5-format, which can
then be processed by the `py-fmas` library code.
This is useful when the project-specific code is separate from the `py-fmas`
library code.
.. codeauthor:: Oliver Melchert <melchert@iqo.uni-hannover.de>
We start by importing the required `py-fmas` functionality. Since the
file-input for `py-fmas` is required to be provided in HDF5-format, we need
some python package that offers the possibility to read and write this
format. Here we opted for the python module h5py which is listed as one of
the dependencies of the `py-fmas` package.
```
import h5py
import numpy as np
import numpy.fft as nfft
```
We then define the desired propagation constant
```
def beta_fun_detuning(w):
r'''Function defining propagation constant
Implements group-velocity dispersion with expansion coefficients
listed in Tab. I of Ref. [1]. Expansion coefficients are valid for
:math:`lambda = 835\,\mathrm{nm}`, i.e. for :math:`\omega_0 \approx
2.56\,\mathrm{rad/fs}`.
References:
[1] J. M. Dudley, G. Genty, S. Coen,
Supercontinuum generation in photonic crystal fiber,
Rev. Mod. Phys. 78 (2006) 1135,
http://dx.doi.org/10.1103/RevModPhys.78.1135
Note:
A corresponding propagation constant is implemented as function
`define_beta_fun_PCF_Ranka2000` in `py-fmas` module
`propatation_constant`.
Args:
w (:obj:`numpy.ndarray`): Angular frequency detuning.
Returns:
:obj:`numpy.ndarray` Propagation constant as function of
frequency detuning.
'''
# ... EXPANSION COEFFICIENTS DISPERSION
b2 = -1.1830e-2 # (fs^2/micron)
b3 = 8.1038e-2 # (fs^3/micron)
b4 = -0.95205e-1 # (fs^4/micron)
b5 = 2.0737e-1 # (fs^5/micron)
b6 = -5.3943e-1 # (fs^6/micron)
b7 = 1.3486 # (fs^7/micron)
b8 = -2.5495 # (fs^8/micron)
b9 = 3.0524 # (fs^9/micron)
b10 = -1.7140 # (fs^10/micron)
# ... PROPAGATION CONSTANT (DEPENDING ON DETUNING)
beta_fun_detuning = np.poly1d([b10/3628800, b9/362880, b8/40320,
b7/5040, b6/720, b5/120, b4/24, b3/6, b2/2, 0., 0.])
return beta_fun_detuning(w)
```
Next, we define all parameters needed to specify a simulation run
```
# -- DEFINE SIMULATION PARAMETERS
# ... COMPUTATIONAL DOMAIN
t_max = 3500. # (fs)
t_num = 2**14 # (-)
z_max = 0.1*1e6 # (micron)
z_num = 4000 # (-)
z_skip = 20 # (-)
t = np.linspace(-t_max, t_max, t_num, endpoint=False)
w = nfft.fftfreq(t.size, d=t[1]-t[0])*2*np.pi
# ... MODEL SPECIFIC PARAMETERS
# ... PROPAGATION CONSTANT
c = 0.29979 # (fs/micron)
lam0 = 0.835 # (micron)
w0 = 2*np.pi*c/lam0 # (rad/fs)
beta_w = beta_fun_detuning(w-w0)
gam0 = 0.11e-6 # (1/W/micron)
n2 = gam0*c/w0 # (micron^2/W)
# ... PARAMETERS FOR RAMAN RESPONSE
fR = 0.18 # (-)
tau1= 12.2 # (fs)
tau2= 32.0 # (fs)
# ... INITIAL CONDITION
t0 = 28.4 # (fs)
P0 = 1e4 # (W)
E_0t_fun = lambda t: np.real(np.sqrt(P0)/np.cosh(t/t0)*np.exp(-1j*w0*t))
E_0t = E_0t_fun(t)
```
The subsequent code will store the simulation parameters defined above to the
file `input_file.h5` in the current working directory.
```
def save_data_hdf5(file_path, data_dict):
with h5py.File(file_path, 'w') as f:
for key, val in data_dict.items():
f.create_dataset(key, data=val)
data_dict = {
't_max': t_max,
't_num': t_num,
'z_min': 0.0,
'z_max': z_max,
'z_num': z_num,
'z_skip': z_skip,
'E_0t': E_0t,
'beta_w': beta_w,
'n2': n2,
'fR': fR,
'tau1': tau1,
'tau2': tau2,
'out_file_path': 'out_file.h5'
}
save_data_hdf5('input_file.h5', data_dict)
```
An example, showing how to use `py-fmas` as a black-box simulation tool that
performs a simulation run for the propagation scenario stored under the file
`input_file.h5` is available under the link below:
`sphx_glr_auto_tutorials_basics_g_app.py`
| github_jupyter |
# VacationPy
----
#### Note
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
from pprint import pprint as pp
# Import API key
from api_keys import g_key
```
### Store Part I results into DataFrame
* Load the csv exported in Part I to a DataFrame
```
city_data = '../WeatherPy/city_data_REW.csv'
city_data_df = pd.read_csv(city_data)
city_data_df = city_data_df.dropna()
city_data_df
```
### Humidity Heatmap
* Configure gmaps.
* Use the Lat and Lng as locations and Humidity as the weight.
* Add Heatmap layer to map.
```
#configure gmaps
gmaps.configure(api_key=g_key)
#defining locations using Lat/Lng and humidity
locations = city_data_df[["Latitude", "Longitude"]].astype(float)
humidity = city_data_df["Humidity (%)"].astype(float)
#printing map and defining weights using Humidity
fig = gmaps.figure()
heat_layer = gmaps.heatmap_layer(locations, weights=humidity,
dissipating=False, max_intensity=100,
point_radius = 1)
fig.add_layer(heat_layer)
fig
```
### Create new DataFrame fitting weather criteria
* Narrow down the cities to fit weather conditions.
* Drop any rows will null values.
```
#constructing new data frame with vacation parameters
vacation_city_df = city_data_df.loc[city_data_df["Wind Speed (MPH)"] < 10]
vacation_city_df = vacation_city_df.loc[(vacation_city_df["Temperature (F)"] < 80) | (vacation_city_df["Temperature (F)"] > 70) ]
vacation_city_df = vacation_city_df.loc[vacation_city_df["Cloudiness (%)"] == 0]
vacation_city_df = vacation_city_df.loc[vacation_city_df["Humidity (%)"] < 30]
if len(vacation_city_df) > 10:
vacation_city_df = vacation_city_df[:-(len(vacation_city_df)-10)]
vacation_city_df
```
### Hotel Map
* Store into variable named `hotel_df`.
* Add a "Hotel Name" column to the DataFrame.
* Set parameters to search for hotels with 5000 meters.
* Hit the Google Places API for each city's coordinates.
* Store the first Hotel result into the DataFrame.
* Plot markers on top of the heatmap.
```
#defining new data frame and adding hotel column
hotel_df = vacation_city_df
hotel_df["Hotel Name"] = ""
hotel_df
#hitting google places API for hotel data
hotels = []
for i, row in hotel_df.iterrows():
lat = row["Latitude"]
lng = row["Longitude"]
target_radius = 5000
# set up a parameters dictionary
params = {
"location": f"{lat}, {lng}",
"type": "lodging",
"key": g_key,
"radius": target_radius,
}
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
response = requests.get(base_url, params=params).json()
#row["Hotel Name"] = response["results"][0]["name"]
try:
hotels.append(response["results"][0]["name"])
except(IndexError):
hotels.append("No Hotels in Target Radius")
hotel_df["Hotel Name"] = hotels
hotel_df
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Latitude", "Longitude"]]
# Add marker layer ontop of heat map
#Plot the hotels on top of the humidity heatmap with each pin containing the Hotel Name, City, and Country.
marker_layer = gmaps.marker_layer(locations, info_box_content=hotel_info)
fig.add_layer(marker_layer)
# Display figure
fig
```
| github_jupyter |
# Train Telecom Customer Churn Prediction with XGBoost
This tutorial is based on [this](https://www.kaggle.com/pavanraj159/telecom-customer-churn-prediction/comments#6.-Model-Performances) Kaggle notebook and [this](https://github.com/gojek/feast/tree/master/examples/feast-xgboost-churn-prediction-tutorial) Feast notebook
```
import numpy as np
import pandas as pd
from hops import featurestore, hdfs
from hops import numpy_helper as numpy
from hops import pandas_helper as pandas
import os
import itertools
import warnings
warnings.filterwarnings("ignore")
import io
import statsmodels, yellowbrick
import sklearn # Tested with 0.22.1
import imblearn
from slugify import slugify
```
### 1.1 Data
```
telecom_df = featurestore.get_featuregroup("telcom_featuregroup", dataframe_type="pandas")
telecom_df.head()
```
### 1.6 Data Preparation for Training
```
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix,accuracy_score,classification_report
from sklearn.metrics import roc_auc_score,roc_curve,scorer
from sklearn.metrics import f1_score
import statsmodels.api as sm
from sklearn.metrics import precision_score,recall_score
from yellowbrick.classifier import DiscriminationThreshold
Id_col = ['customer_id']
target_col = ["churn"]
# Split into a train and test set
train, test = train_test_split(telecom_df,test_size = .25 ,random_state = 111)
# Seperating dependent and independent variables
cols = [i for i in telecom_df.columns if i not in Id_col + target_col]
training_x = train[cols]
training_y = train[target_col]
testing_x = test[cols]
testing_y = test[target_col]
```
### 1.7 Training
```
from xgboost import XGBClassifier
xgb_model = XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bytree=1, gamma=0, learning_rate=0.9, max_delta_step=0,
max_depth=7, min_child_weight=1, missing=None, n_estimators=100,
n_jobs=1, nthread=None, objective='binary:logistic', random_state=0,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,
silent=True, subsample=1)
# Train model
xgb_model.fit(training_x, training_y)
predictions = xgb_model.predict(testing_x)
probabilities = xgb_model.predict_proba(testing_x)
```
### 1.8 Analysis
```
coefficients = pd.DataFrame(xgb_model.feature_importances_)
column_df = pd.DataFrame(cols)
coef_sumry = (pd.merge(coefficients, column_df, left_index=True,
right_index=True, how="left"))
coef_sumry.columns = ["coefficients", "features"]
coef_sumry = coef_sumry.sort_values(by="coefficients", ascending=False)
acc = accuracy_score(testing_y, predictions)
print(xgb_model)
print("\n Classification report : \n", classification_report(testing_y, predictions))
print("Accuracy Score : ", acc)
from hops import model
import pickle
MODEL_NAME = "XGBoost_Churn_Classifier"
file_name = "xgb_reg.pkl"
hdfs_path = "Resources/xgboost_model"
pickle.dump(xgb_model, open(file_name, "wb"))
hdfs.mkdir(hdfs_path)
hdfs.copy_to_hdfs(file_name, hdfs_path, overwrite=True)
# test that we can load and use the model
xgb_model_loaded = pickle.load(open(file_name, "rb"))
xgb_model_loaded.predict(testing_x)[0] == xgb_model.predict(testing_x)[0]
# save to the model registry
model.export(hdfs_path, MODEL_NAME, metrics={'accuracy': acc})
```
| github_jupyter |
```
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="whitegrid")
import numpy as np
import scanpy.api as sc
from anndata import read_h5ad
from anndata import AnnData
import scipy as sp
import scipy.stats
from gprofiler import GProfiler
import pickle
# Other specific functions
from itertools import product
from statsmodels.stats.multitest import multipletests
import util
# R related packages
import rpy2.rinterface_lib.callbacks
import logging
from rpy2.robjects import pandas2ri
import anndata2ri
# Ignore R warning messages
#Note: this can be commented out to get more verbose R output
rpy2.rinterface_lib.callbacks.logger.setLevel(logging.ERROR)
# Automatically convert rpy2 outputs to pandas dataframes
pandas2ri.activate()
anndata2ri.activate()
%load_ext rpy2.ipython
# autoreload
%load_ext autoreload
%autoreload 2
# logging
sc.logging.print_versions()
%%R
library(MAST)
```
## Load data
```
# Data path
data_path = '/data3/martin/tms_gene_data'
output_folder = data_path + '/DE_result'
# Load the data
adata_combine = util.load_normalized_data(data_path)
temp_facs = adata_combine[adata_combine.obs['b_method']=='facs',]
temp_droplet = adata_combine[adata_combine.obs['b_method']=='droplet',]
```
## Generate a list of tissues for DE testing
```
tissue_list = list(set(temp_droplet.obs['tissue']))
min_cell_number = 1
analysis_list = []
analysis_info = {}
# for cell_type in cell_type_list:
for tissue in tissue_list:
analyte = tissue
ind_select = (temp_droplet.obs['tissue'] == tissue)
n_young = (temp_droplet.obs['age'][ind_select].isin(['1m', '3m'])).sum()
n_old = (temp_droplet.obs['age'][ind_select].isin(['18m', '21m',
'24m', '30m'])).sum()
analysis_info[analyte] = {}
analysis_info[analyte]['n_young'] = n_young
analysis_info[analyte]['n_old'] = n_old
if (n_young>min_cell_number) & (n_old>min_cell_number):
print('%s, n_young=%d, n_old=%d'%(analyte, n_young, n_old))
analysis_list.append(analyte)
```
### DE using R package MAST
```
## DE testing
gene_name_list = np.array(temp_droplet.var_names)
DE_result_MAST = {}
for i_analyte,analyte in enumerate(analysis_list):
print(analyte, '%d/%d'%(i_analyte, len(analysis_list)))
tissue = analyte
ind_select = (temp_droplet.obs['tissue'] == tissue)
adata_temp = temp_droplet[ind_select,]
# reformatting
adata_temp.X = np.array(adata_temp.X.todense())
adata_temp.obs['condition'] = [int(x[:-1]) for x in adata_temp.obs['age']]
adata_temp.obs = adata_temp.obs[['condition', 'sex']]
if len(set(adata_temp.obs['sex'])) <2:
covariate = ''
else:
covariate = '+sex'
# # toy example
# covariate = ''
# np.random.seed(0)
# ind_select = np.random.permutation(adata_temp.shape[0])[0:100]
# ind_select = np.sort(ind_select)
# adata_temp = adata_temp[ind_select, 0:3]
# adata_temp.X[:,0] = (adata_temp.obs['sex'] == 'male')*3
# adata_temp.X[:,1] = (adata_temp.obs['condition'])*3
# DE using MAST
R_cmd = util.call_MAST_age()
get_ipython().run_cell_magic(u'R', u'-i adata_temp -i covariate -o de_res', R_cmd)
de_res.columns = ['gene', 'raw-p', 'coef', 'bh-p']
de_res.index = de_res['gene']
DE_result_MAST[analyte] = pd.DataFrame(index = gene_name_list)
DE_result_MAST[analyte] = DE_result_MAST[analyte].join(de_res)
# fc between yound and old
X = adata_temp.X
y = (adata_temp.obs['condition']>10)
DE_result_MAST[analyte]['fc'] = X[y,:].mean(axis=0) - X[~y,:].mean(axis=0)
# break
```
### Save DE results
```
with open(output_folder+'/DE_tissue_droplet.pickle', 'wb') as handle:
pickle.dump(DE_result_MAST, handle)
pickle.dump(analysis_list, handle)
pickle.dump(analysis_info, handle)
```
| github_jupyter |
# Using `bw2waterbalancer`
Notebook showing typical usage of `bw2waterbalancer`
## Generating the samples
`bw2waterbalancer` works with Brightway2. You only need set as current a project in which the database for which you want to balance water exchanges is imported.
```
import brightway2 as bw
import numpy as np
bw.projects.set_current('ei36cutoff')
```
The only Class you need is the `DatabaseWaterBalancer`:
```
from bw2waterbalancer import DatabaseWaterBalancer
```
Instantiating the DatabaseWaterBalancer will automatically identify activities that are associated with water exchanges.
```
dwb = DatabaseWaterBalancer(
ecoinvent_version="3.6", # used to identify activities with water production exchanges
database_name="ei36_cutoff", #name the LCI db in the brightway2 project
)
```
Generating presamples for the whole database is a lengthy process. Thankfully, it only ever needs to be done once per database:
```
dwb.add_samples_for_all_acts(iterations=1000)
```
The samples and associated indices are stored as attributes:
```
dwb.matrix_samples
dwb.matrix_samples.shape
dwb.matrix_indices[0:10] # First ten indices
len(dwb.matrix_indices)
```
These can directly be used to generate [`presamples`](https://presamples.readthedocs.io/):
```
presamples_id, presamples_fp = dwb.create_presamples(
name=None, #Could have specified a string as name, not passing anything will use automatically generated random name
dirpath=None, #Could have specified a directory path to save presamples somewhere specific
id_=None, #Could have specified a string as id, not passing anything will use automatically generated random id
seed='sequential', #or None, or int.
)
```
## Using the samples
The samples are formatted for use in brighway2 via the presamples package.
The following function calculates:
- Deterministic results, using `bw.LCA`
- Stochastic results, using `bw.MonteCarloLCA`
- Stochastic results using presamples, using `bw.MonteCarloLCA` and passing `presamples=[presamples_fp]`
The ratio of stochastic results to deterministic results are then plotted for Monte Carlo results with and without presamples.
Ratios for Monte Carlo with presamples are on the order of 1.
Ratios for Monte Carlo without presamples are much greater, as much (for the randomly selected activities) up to two orders of magnitude.
```
def check_presamples_act(act_key, ps_fp, lcia_method, iterations=1000):
"""Plot histrograms of Monte Carlo samples/det result for case w/ and w/o presamples"""
lca = bw.LCA({act_key:1}, method=m)
lca.lci()
lca.lcia()
mc_arr_wo = np.empty(shape=iterations)
mc = bw.MonteCarloLCA({act_key:1}, method=m)
for i in range(iterations):
mc_arr_wo[i] = next(mc)/lca.score
mc_arr_w = np.empty(shape=iterations)
mc_w = bw.MonteCarloLCA({act_key:1}, method=m, presamples=[ps_fp])
for i in range(iterations):
mc_arr_w[i] = next(mc_w)/lca.score
plt.hist(mc_arr_wo, histtype="step", color='orange', label="without presamples")
plt.hist(mc_arr_w, histtype="step", color='green', label="with presamples")
plt.legend()
```
Let's run this on a couple of random ecoinvent products with the ImpactWorld+ water scarcity LCIA method:
```
m=('IMPACTWorld+ (Default_Recommended_Midpoint 1.23)', 'Midpoint', 'Water scarcity')
import matplotlib.pyplot as plt
%matplotlib inline
act = bw.Database('ei36_cutoff').random()
print("Randomly working on ", act)
check_presamples_act(act.key, presamples_fp, m)
act = bw.Database('ei36_cutoff').random()
print("Randomly working on ", act)
check_presamples_act(act.key, presamples_fp, m)
act = bw.Database('ei36_cutoff').random()
print("Randomly working on ", act)
check_presamples_act(act.key, presamples_fp, m)
act = bw.Database('ei36_cutoff').random()
print("Randomly working on ", act)
check_presamples_act(act.key, presamples_fp, m)
act = bw.Database('ei36_cutoff').random()
print("Randomly working on ", act)
check_presamples_act(act.key, presamples_fp, m)
```
| github_jupyter |
# Brian Larsen - 28 April 2016 balarsen@lanl.gov
# Overview
This is meant to be a proof of concept example of the SWx to Mission instrument prediction planned for AFTAC funding.
## Abstract
There is currently no accepted method within SNDD for the time dependent quantification of environmental background in this mission instruments, this work aims to provide this capability. This work provides a preliminary proof of concept of the techniques in order to both show utility and provide a baseline for improvement. The methods incorporated here are Bayesian MCMC curve fitting and Bayesian posterior sampling. The method is to utilize data of SWx and mission instruments to find (linear) correlation coefficients using all the data. Then utilizing the relations discovered in the correlation step the predicted response in the mission instruments are then created.
## Goals
1. Provide a quantifiable prediction of count rate in the mission instruments
1. Need to study which data are required to provide the best prediction
1. First step is beginning with data moving toward prediction, second step is a first principle assessment of background based on SAGE
1. Provide an automatic method to determine a quantifiable deviation from the prediction in order to determine Events of Interest (EOI)
1. Provide an update and future capability into the method for update and reassessment of accuracy and addition of additional data
1. Provide mechanisms for the inclusion of the model and prediction rules into DIORAMA without DIORAMA having to run MCMC.
### The Steps are
1. Perform a correlation analysis between the two data sets figuring out which variables make the best prediction
1. Use the results of this correlation to predict the response for the Mission instrument from the SWx
1. Utilize the predicted response to determine quantifiable count rate prediction limits
1. Develop a technique to quantify excursions of the observed data from the predicted limits to flag as EOI with an interest score
```
!date
# Standard Library Python Modules
# Common Python Modules
import matplotlib.pyplot as plt
import numpy as np
import spacepy.plot as spp
import spacepy.toolbox as tb
import pandas as pd
import pymc # this is the MCMC tool
# put plots into this document
%matplotlib inline
```
Create simulated data that can be used in this proof of concept
```
# observed data
from scipy.signal import savgol_filter
# make a time dependent x
t_x = np.random.uniform(0, 1, 500)
t_x = savgol_filter(t_x, 95, 2)[95//2:-95//2]
t_x -= t_x.min()
t_x /= (t_x.max() - t_x.min())
plt.plot(t_x)
plt.xlabel('time')
plt.ylabel('SWx data value')
a = 6
b = 2
sigma = 2.0
y_obs = a*t_x + b + np.random.normal(0, sigma, len(t_x))
data = pd.DataFrame(np.array([t_x, y_obs]).T, columns=['x', 'y'])
x = t_x
data.plot(x='x', y='y', kind='scatter', s=50)
plt.xlabel('SWx inst')
plt.ylabel('Mission Inst')
# define priors
a = pymc.Normal('slope', mu=0, tau=1.0/10**2)
b = pymc.Normal('intercept', mu=0, tau=1.0/10**2)
tau = pymc.Gamma("tau", alpha=0.1, beta=0.1)
# define likelihood
@pymc.deterministic
def mu(a=a, b=b, x=x):
return a*x + b
y = pymc.Normal('y', mu=mu, tau=tau, value=y_obs, observed=True)
# inference
m = pymc.Model([a, b, tau, x, y])
mc = pymc.MCMC(m)
# run 6 chains
for i in range(6):
mc.sample(iter=90000, burn=10000)
# plot up the data and overplot the possible fit lines
data.plot(x='x', y='y', kind='scatter', s=50)
xx = np.linspace(data.x.min(), data.x.max(), 10)
for ii in range(0, len(mc.trace('slope', chain=None)[:]),
len(mc.trace('slope', chain=None)[:])//400):
yy = (xx*mc.trace('slope', chain=None)[:][ii] +
mc.trace('intercept', chain=None)[:][ii])
plt.plot(xx,yy, c='r')
pymc.Matplot.plot(mc)
pymc.Matplot.summary_plot(mc)
```
Now based on the results above we can use this as a prediction of the Y-data from the X-data into the future
```
```
Now that we have time dependent data use the results from the above correlation to predict what the Y-instrument would have seen
```
int_vals = mc.stats()['intercept']['95% HPD interval']
slope_vals = mc.stats()['slope']['95% HPD interval']
print(int_vals, slope_vals)
y_inst = np.tile(t_x, (2,1)).T * slope_vals + int_vals
plt.plot(y_inst, c='r')
plt.xlabel('time')
plt.ylabel('Mission inst value')
plt.title('Major upper limit of spread')
```
Or do this smarter by sampling the posterior
```
pred = []
for v in t_x:
pred.append(np.percentile(v * mc.trace('slope',
chain=None)[:] +
mc.trace('intercept',
chain=None)[:],
[2.5, 97.5]))
plt.plot(pred, c='r')
plt.xlabel('time')
plt.ylabel('Predicted Mission inst value')
```
# Next steps
1. Statistically validate and/or modify this proof of concept.
1. Determine how to quantify the prediction capability
1. Determine methods to identify EOI outside of prediction
# Issues
* The overall spread in the data are not captured (enveloped) by this proof of concept
# Another possible method
For each value of X (or small range of X) fit a distribution to the Y (what kind?) and use that as a prediction. This is maybe not as good as there is likely to not be continous.
| github_jupyter |
```
import numpy as np
import pandas as pd
%matplotlib inline
import math
from xgboost.sklearn import XGBClassifier
from sklearn.cross_validation import cross_val_score
from sklearn import cross_validation
from sklearn.metrics import roc_auc_score
from matplotlib import pyplot
train = pd.read_csv("xtrain.csv")
target = pd.read_csv("ytrain.csv")
test = pd.read_csv("xtest.csv")
train.head()
train.describe()
target.head()
for column in train:
print column, ": ", len(train[column].unique())
cat_features = []
real_features = []
for column in train:
if len(train[column].unique()) > 21:
real_features.append(column)
else:
cat_features.append(column)
# построим гистограммы для первых 50к значений для категориальных признаков
train[cat_features].head(50000).plot.hist(bins = 100, figsize=(20, 20))
test[cat_features].head(50000).plot.hist(bins = 100, figsize=(20, 20))
# построим гистограммы для первых 50к значений для остальных признаков
train[real_features].head(50000).plot.hist(bins = 100, figsize=(20, 20))
test[real_features].head(50000).plot.hist(bins = 100, figsize=(20, 20))
#гистограммы для теста и обучающей выборки совпадают
import seaborn
seaborn.heatmap(train[real_features].corr(), square=True)
#числовые признаки не коррелируеют между собой
# в данных есть nan values в каждом столбце
train.isnull().sum()
#для категориальных признаков, nan значения заменим -1
#Для действительных признаков - заменим средним значнием
train[cat_features] = train[cat_features].fillna(-1)
for column in train[real_features]:
mean_val = train[column].mean()
train[column] = train[column].fillna(mean_val)
target.mean() #класса 0 больше чем 1
import xgboost as xgb
from sklearn.cross_validation import train_test_split
X_fit, X_eval, y_fit, y_eval= train_test_split(
train, target, test_size=0.20, random_state=1
)
clf = xgb.XGBClassifier(missing=np.nan, max_depth=3,
n_estimators=550, learning_rate=0.05, gamma =0.3, min_child_weight = 3,
subsample=0.9, colsample_bytree=0.8, seed=2000,objective= 'binary:logistic')
clf.fit(X_fit, y_fit, early_stopping_rounds=40, eval_metric="auc", eval_set=[(X_eval, y_eval)])
auc_train = roc_auc_score(y_fit.x, clf.predict(X_fit))
auc_val = roc_auc_score(y_eval.x, clf.predict(X_eval))
print 'auc_train: ', auc_train
print 'auc_val: ', auc_val
#имеет место быть переобучение
eps = 1e-5
dropped_columns = set()
C = train.columns
#Определим константные признаки
for c in C:
if train[c].var() < eps:
print '.. %-30s: too low variance ... column ignored'%(c)
dropped_columns.add(c)
#таких не обнаружено
for i, c1 in enumerate(C):
f1 = train[c1].values
for j, c2 in enumerate(C[i+1:]):
f2 = train[c2].values
if np.all(f1 == f2):
dropped_columns.add(c2)
print c2
# одинаковых полей также нет
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
import matplotlib.pyplot as plt
forest = ExtraTreesClassifier(n_estimators=150,
random_state=0)
forest.fit(train.head(100000), target.head(100000).x)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Попробуем посмотреть какие признаки значимы с помощью деревьев
print("Feature ranking:")
for f in range(train.head(100000).shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Построим графики
plt.figure()
plt.title("Feature importances")
plt.bar(range(train.head(100000).shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(train.head(100000).shape[1]), indices)
plt.xlim([-1, train.head(100000).shape[1]])
plt.show()
# Явных лидеров как и аутсайдеров среди признаков не видно. Признаки анонимны,
# еще раз обучим модель с более сложными вычислительно гиперпараметрами
from sklearn.cross_validation import train_test_split
import xgboost as xgb
X_fit, X_eval, y_fit, y_eval= train_test_split(
train, target, test_size=0.20, random_state=1
)
clf = xgb.XGBClassifier(missing=np.nan, max_depth=3,
n_estimators=1200, learning_rate=0.05, gamma =0.3, min_child_weight = 3,
subsample=0.9, colsample_bytree=0.8, seed=2000,objective= 'binary:logistic')
clf.fit(X_fit, y_fit, early_stopping_rounds=40, eval_metric="auc", eval_set=[(X_eval, y_eval)])
# формирование результатов
test_target = clf.predict(test)
submission = pd.DataFrame(test_target)
submission.to_csv("test_target.csv", index=False)
```
| github_jupyter |
# Insight into AirBNB Boston Data
A quick glance at [AirBnB Boston data](https://www.kaggle.com/airbnb/boston) arouse curiosity to see if following questions can be convincingly answered using data analysis.
- What are hot locations?
- What are peak seasons?
- Does number of properties in neighbourhood affect the occupancy?
- What are the factors affecting overall occupancy and review ratings?
Load the necessary libraries and list the files in dataset
```
conda install -c conda-forge textblob
import os
print(os.listdir("input"))
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import AdaBoostRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.preprocessing import MinMaxScaler
import datetime
from textblob import TextBlob
import nltk
nltk.download('punkt')
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# Input data files are available in the "input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("input"))
```
## Data Understanding
### Listings
Take a look at the data set describing all the different properties that are being offered at airBNB in Boston.
```
df_listings = pd.read_csv('input/listings.csv', parse_dates=['host_since'])
print(df_listings.shape)
df_listings.head(n=5)
```
Take a look at missing values in dataset.
```
(100*(df_listings.shape[0] - df_listings.count())/df_listings.shape[0]).plot(kind="bar", title="Percentage Missing Data", figsize=(20, 8));
```
**Following columns have high percentage of missing data**
- neighbourhood_group_cleansed
- square_feet
- has_availability
- license
- jurisdiction_names
### Calendar/Occupancy Data
Load and understand structure of calendar/occupancy data
```
df_calendar = pd.read_csv('input/calendar.csv')
print(df_calendar.shape)
df_calendar.head()
```
- available is a boolean column with string values 'f' and 't' representing occupied and available status
- price values are available only for dates when property is available and missing when property is occupied
### Customer Reviews Data
Glance into the customer reviews to understand the data
```
df_reviews = pd.read_csv('input/reviews.csv')
print(df_reviews.shape)
df_reviews.head()
```
## Data Preparation
### Define some utility functions
```
def amt_str_to_float(text):
'''
INPUT:
text - formatted amount text to convert to float value
OUTPUT:
amount - A parsed float value
Parses the amount values specified as "$2,332" into a float value 2332.00
'''
return float(text[1:].replace(',','')) if type(text) == str else text
def pct_str_to_float(text):
'''
INPUT:
text - formatted percentage text to convert to float value
OUTPUT:
percentage - A parsed float value
Parses the text percentage values specified as "98%" into a float value 98.00
'''
return float(text[:-1]) if type(text) == str else text
def get_sentiment_score(text):
'''
INPUT:
text - input text for sentiment analysis
OUTPUT:
score - sentiment score asa float value
Calculates the sentiment score using textblob library and returns the score
'''
blob = TextBlob(text)
scores = []
for sentence in blob.sentences:
scores.append(sentence.sentiment.polarity)
return np.mean(scores)
```
### Cleanup and Prepare Listings Data
- Of various features describing the property we use *description* to calculate the sentiment score and store it as *description_score*. This is done to see if property description has any effect on occupancy and review ratings.
- Drop irrelevant columns
- Apply other data wrangling and imputation techniques to fill the missing values.
```
def prepare_listings_data(df):
dfc = df.copy()
# 1. Defined calculated/derived columns
# Extract sentiment score of the property description
dfc['description_score'] = dfc.description.apply(get_sentiment_score)
# Extract annnual proportional occupancy for the property
dfc['occupancy'] = (1 - dfc.availability_365/365)
# Extract age of property listing
dfc['listing_age'] = (datetime.datetime(2017, 1, 1) - dfc.host_since).apply(lambda col: col.days)
# 2. Drop irrelevant columns
columns_having_same_values = ['country', 'country_code', 'state', 'experiences_offered']
# Drop Columns that are
# a. descriptive,
# b. image urls
# c. High NaN values
# d. that have been mapped to other calculated/provided columns
# - e.g. coarse 'neighbourhoood' is considered in analysis instead of geo locations, street etc.
# - description is mapped to corresponding sentiment score and availability_365 is mapped to occupancy
irrelevant_columns = ['listing_url', 'scrape_id', 'last_scraped', 'notes', 'transit',
'access', 'interaction', 'house_rules', 'thumbnail_url', 'medium_url',
'picture_url', 'xl_picture_url', 'host_url', 'host_name',
'host_thumbnail_url', 'host_picture_url', 'host_neighbourhood', 'host_listings_count', 'host_total_listings_count', 'host_verifications',
'calculated_host_listings_count', 'reviews_per_month', 'requires_license', 'license', 'jurisdiction_names',
'host_id', 'host_location', 'host_about', 'neighbourhood_group_cleansed', 'latitude', 'longitude',
'market', 'smart_location', 'street', 'square_feet', 'amenities',
'maximum_nights', 'calendar_updated', 'has_availability', 'availability_30', 'availability_60', 'availability_90',
'calendar_last_scraped', 'first_review', 'last_review', 'neighbourhood', 'neighborhood_overview',
'name' ,'summary' ,'space' ,'description', 'city', 'zipcode', 'availability_365', 'host_since'
]
columns_to_drop = columns_having_same_values
columns_to_drop.extend(irrelevant_columns)
dfc.drop(columns_to_drop, axis=1, inplace=True)
# 3. Convert binary columns into 0,1
binary_columns = ['host_is_superhost', 'host_has_profile_pic', 'host_identity_verified',
'is_location_exact', 'instant_bookable', 'require_guest_profile_picture', 'require_guest_phone_verification'
]
for col in binary_columns:
dfc[col] = dfc[col].apply(lambda c: 1 if c == 't' else 0)
# 4. Prepare numeric columns
# Convert Amount columns to number from string
dfc['price'] = dfc['price'].apply(amt_str_to_float)
dfc['weekly_price'] = dfc['weekly_price'].apply(amt_str_to_float)
dfc['monthly_price'] = dfc['monthly_price'].apply(amt_str_to_float)
dfc['security_deposit'] = dfc['security_deposit'].apply(amt_str_to_float)
dfc['cleaning_fee'] = dfc['cleaning_fee'].apply(amt_str_to_float)
dfc['extra_people'] = dfc['extra_people'].apply(amt_str_to_float)
# Convert String Percentage values to numeric
dfc['host_response_rate'] = dfc['host_response_rate'].apply(pct_str_to_float)
dfc['host_acceptance_rate'] = dfc['host_acceptance_rate'].apply(pct_str_to_float)
# 5. Apply Imputation to fill missing values
# security deposit and cleaning fee can be marked 0 if not specified
dfc['security_deposit'].fillna(0, inplace=True)
dfc['cleaning_fee'].fillna(0, inplace=True)
# Weekly and Monthly prices can be filled with simply multiplication.
dfc['weekly_price'] = np.where(np.isnan(dfc['weekly_price']), dfc['price']*7, dfc['weekly_price'])
dfc['monthly_price'] = np.where(np.isnan(dfc['monthly_price']), dfc['price']*30, dfc['monthly_price'])
# Missing Number of Bathrooms: We can assume 1 bathroom per bedroom (if bedrooms are specified)
# Vice-versa Missing Number of Bedrooms: We can assume to be same as number of bathrooms (if specified)
dfc['bathrooms'] = np.where(np.isnan(dfc['bathrooms']), dfc['bedrooms'], dfc['bathrooms'])
dfc['bedrooms'] = np.where(np.isnan(dfc['bedrooms']), dfc['bathrooms'], dfc['bedrooms'])
# Missing number of beds - Fill with average number of beds per bedroom * number_of_bedrooms
average_beds_ped_bedroom = (dfc[dfc.bedrooms>0].beds/dfc[dfc.bedrooms>0].bedrooms).mean()
dfc['beds'] = np.where(np.isnan(dfc['beds']), average_beds_ped_bedroom*dfc['bedrooms'], dfc['beds'])
# Fill host_response_rate and host_acceptance_rate to corresponding mean values
dfc['host_response_rate'].fillna(dfc['host_response_rate'].mean(), inplace=True)
dfc['host_acceptance_rate'].fillna(dfc['host_acceptance_rate'].mean(), inplace=True)
# Fill Categorical variables using mode()
dfc['host_response_time'].fillna(dfc['host_response_time'].mode()[0], inplace=True)
dfc['property_type'].fillna(dfc['property_type'].mode()[0], inplace=True)
dfc.rename({'neighbourhood_cleansed':'neighbourhood'}, axis=1, inplace=True)
return dfc
dfc_listings = prepare_listings_data(df_listings)
dfc_listings.head(n=5)
```
### Cleanup and Prepare Calendar Data
```
def prepare_calendar_data(df):
monthNames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dfc = df.copy()
# Convert available=(t,f) => status=(available/occupied)
dfc['status'] = dfc['available'].apply(lambda col: 'available' if col == 't' else 'occupied')
dfc.drop('available', axis=1, inplace=True)
# Convert text price to float value
dfc['price'] = dfc.price.apply(amt_str_to_float)
# Extract Month from date column and store as Month Number (1 based index) and Month Name
dfc['month'] = dfc.date.apply(lambda col: int(col[5:7]))
dfc['month_name'] = dfc.date.apply(lambda col: monthNames[int(col[5:7])-1])
return dfc
dfc_calendar = prepare_calendar_data(df_calendar)
dfc_calendar.head()
```
Pivot on Month and calculate occupancy percentage per property per month
```
df_occupancy = pd.pivot_table(dfc_calendar.groupby(['listing_id', 'status', 'month', 'month_name']).count().reset_index(), index=["listing_id", "month", "month_name"], columns='status', values='date').reset_index().rename_axis(None, axis=1)
# If property is fully occupied then 'available' remains NaN
# conversely if property is fully available then 'occupied' remains NaN
# it is apt to fill these values as 0
df_occupancy.fillna(0, inplace=True)
df_occupancy['occupancy']=100*df_occupancy['occupied']/(df_occupancy['occupied']+df_occupancy['available'])
df_occupancy.head()
```
### Cleanup and Prepare Review Data
- Calculate the sentiment score from the review comments
- Keep only property id and sentiment score and drop other columns
```
def prepare_reviews_data(df):
dfc = df.copy()
# Extract sentiment score from the review comments
dfc.comments = dfc.comments.apply(lambda col: col if type(col) == str else '')
dfc['review_score'] = dfc.comments.apply(get_sentiment_score)
dfc.drop('reviewer_id', axis=1, inplace=True)
dfc.drop('reviewer_name', axis=1, inplace=True)
dfc.drop('id', axis=1, inplace=True)
dfc.drop('date', axis=1, inplace=True)
dfc.drop('comments', axis=1, inplace=True)
return dfc
dfc_reviews = prepare_reviews_data(df_reviews)
dfc_reviews.head()
```
## Data Modeling
At this point we still have some missing values in ratings columns, primarily for the properties where no reviews were given.
```
# Drop the Listing-Id column from the regression analysis and drop other missing values
df_regression = pd.get_dummies(dfc_listings.drop('id', axis=1).dropna())
```
Fit a linear regression model for predicting occupancy rate and customer review rating
```
def coef_weights(model, columns):
'''
INPUT:
coefficients - the coefficients of the linear model
X_train - the training data, so the column names can be used
OUTPUT:
coefs_df - a dataframe holding the coefficient, estimate, and abs(estimate)
Provides a dataframe that can be used to understand the most influential coefficients
in a linear model by providing the coefficient estimates along with the name of the
variable attached to the coefficient.
'''
coefs_df = pd.DataFrame()
coefs_df['est_int'] = columns
coefs_df['coefs'] = model.coef_
coefs_df['abs_coefs'] = np.abs(model.coef_)
coefs_df = coefs_df.sort_values('abs_coefs', ascending=False)
return coefs_df
def get_lr_model(df, target):
X = df.drop(target, axis=1)
y = df[target]
# Create training and test sets of data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3, random_state=42)
# Instantiate a LinearRegression model with normalized data
model = LinearRegression(normalize=True)
# Fit your model to the training data
model = model.fit(X_train, y_train)
# Predict the response for the training data and the test data
y_pred = model.predict(X_test)
# Obtain an rsquared value for both the training and test data
train_score = r2_score(y_train, model.predict(X_train))
test_score = r2_score(y_test, y_pred)
features = coef_weights(model, X_train.columns)
return model, train_score, test_score, features
occupancy_results = get_lr_model(df_regression, 'occupancy')
reviewscores_results = get_lr_model(df_regression, 'review_scores_rating')
```
## Evaluation
### Hot locations
```
plt_data = dfc_listings[['neighbourhood', 'occupancy']].groupby('neighbourhood').mean()
ax = plt_data.plot(kind="bar", title="Occupancy Percentage by Neighbourhood", label="Neighbourhood", figsize=(18,8));
for p in ax.patches:
ax.annotate('{:.2f}'.format(p.get_height()), (p.get_x() * 1.005, p.get_height() * 1.005))
```
- Alston, Mission Hill and Leather Hill have high average occupancy
- Mattapan and Roslindale have least occupancy rates
### Peak and lean seasons
```
df = df_occupancy[['month', 'occupancy']].groupby(['month']).mean()
ax = df.plot(kind="bar", title="Occupancy Percentage by Month", figsize=(18,8));
for p in ax.patches:
ax.annotate('{:.1f}%'.format(p.get_height()), (p.get_x() * 1.005, p.get_height() * 1.005))
```
- September and October show high occupancy
- The occupancy is around 50% for most of the remaining period.
** These observations may not be conclusive as it looks at data only for one year**
## Does number of listings in neighbourhood affect the occupancy?
Let us see if there are some neighbourhoods showing low occupancy rates coupled with high number of listings.
```
d1 = pd.DataFrame(dfc_listings.neighbourhood.value_counts()).rename(columns={'neighbourhood':'listing_count'})
d2 = dfc_listings[['neighbourhood', 'occupancy']].groupby('neighbourhood').mean()
data1 = pd.merge(d1, d2, left_index=True, right_index=True)
fig, ax1 = plt.subplots(figsize=(18,8))
color = 'tab:blue'
ax1.set_xlabel('Neighbourhood')
ax1.set_ylabel('Number of Listing', color=color)
ax1.bar(data1.index, data1.listing_count, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax1.tick_params(axis='x',rotation=90)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:green'
ax2.set_ylabel('Occupancy', color=color) # we already handled the x-label with ax1
ax2.scatter(data1.index, data1.occupancy, color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
```
- Leather District has very less number of listings with high occupancy
- Highest number of listings are in Jamaica Plain and South End. However, these neighbourhoods still have higher occupancy rates compared to many other neighbourhoods.
In summary, the plot is all over the place and there is clearly no sign of over crowding of listings in any neighbourhood.
-
### Features affecting the occupancy and customer ratings
```
# Top 10 features affecting occupancy
occupancy_results[3].head(10)
# Top 10 features affecting occupancy
reviewscores_results[3].head(10)
```
# Conclusion
- There are few neighbourhoods that show higher occupancy compared to others. However, there are no standouts by great margin
- September and October appear to be peak seasons. However, any conclusion on this needs analysis of data over years.
- Poor correlation between number of properties in a neighbourhoood and occupancy rate. **No conclusive evidence to suggest over supply of properties in any neighbourhood**
- Prominant features deciding occupancy
- **Property type seems to be most important feature that renters look for with special preference to Villa and appartments**
- **This is followed by bed type with general dislike for properties with air-beds and couches**
- Prominant features deciding review ratings
- **Host Response Time showed up as most important feature deciding the review ratings. Possibly due to the first impression effect**
- **This was followed by property type and cancellation policy**
```
!jupyter nbconvert --to html airbnb-boston.ipynb
!mv airbnb-boston.html ../airbnb-boston-report.html
```
| github_jupyter |
<center>
<img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DA0101EN-SkillsNetwork/labs/Module%203/images/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" />
</center>
# Data Analysis with Python
Estimated time needed: **30** minutes
## Objectives
After completing this lab you will be able to:
* Explore features or charecteristics to predict price of car
<h2>Table of Contents</h2>
<div class="alert alert-block alert-info" style="margin-top: 20px">
<ol>
<li><a href="https://#import_data">Import Data from Module</a></li>
<li><a href="https://#pattern_visualization">Analyzing Individual Feature Patterns using Visualization</a></li>
<li><a href="https://#discriptive_statistics">Descriptive Statistical Analysis</a></li>
<li><a href="https://#basic_grouping">Basics of Grouping</a></li>
<li><a href="https://#correlation_causation">Correlation and Causation</a></li>
<li><a href="https://#anova">ANOVA</a></li>
</ol>
</div>
<hr>
<h3>What are the main characteristics that have the most impact on the car price?</h3>
<h2 id="import_data">1. Import Data from Module 2</h2>
<h4>Setup</h4>
Import libraries:
```
#install specific version of libraries used in lab
#! mamba install pandas==1.3.3
#! mamba install numpy=1.21.2
#! mamba install scipy=1.7.1-y
#! mamba install seaborn=0.9.0-y
import pandas as pd
import numpy as np
```
Load the data and store it in dataframe `df`:
This dataset was hosted on IBM Cloud object. Click <a href="https://cocl.us/DA101EN_object_storage?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDA0101ENSkillsNetwork20235326-2021-01-01">HERE</a> for free storage.
```
path='https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DA0101EN-SkillsNetwork/labs/Data%20files/automobileEDA.csv'
df = pd.read_csv(path)
df.head()
```
<h2 id="pattern_visualization">2. Analyzing Individual Feature Patterns Using Visualization</h2>
To install Seaborn we use pip, the Python package manager.
Import visualization packages "Matplotlib" and "Seaborn". Don't forget about "%matplotlib inline" to plot in a Jupyter notebook.
```
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
```
<h4>How to choose the right visualization method?</h4>
<p>When visualizing individual variables, it is important to first understand what type of variable you are dealing with. This will help us find the right visualization method for that variable.</p>
```
# list the data types for each column
print(df.dtypes)
```
<div class="alert alert-danger alertdanger" style="margin-top: 20px">
<h3>Question #1:</h3>
<b>What is the data type of the column "peak-rpm"? </b>
</div>
```
# Write your code below and press Shift+Enter to execute
print(df['peak-rpm'].dtypes)
```
<details><summary>Click here for the solution</summary>
```python
float64
```
</details>
For example, we can calculate the correlation between variables of type "int64" or "float64" using the method "corr":
```
df.corr()
```
The diagonal elements are always one; we will study correlation more precisely Pearson correlation in-depth at the end of the notebook.
<div class="alert alert-danger alertdanger" style="margin-top: 20px">
<h3> Question #2: </h3>
<p>Find the correlation between the following columns: bore, stroke, compression-ratio, and horsepower.</p>
<p>Hint: if you would like to select those columns, use the following syntax: df[['bore','stroke','compression-ratio','horsepower']]</p>
</div>
```
# Write your code below and press Shift+Enter to execute
df[['bore', 'stroke', 'compression-ratio', 'horsepower']].corr()
```
<details><summary>Click here for the solution</summary>
```python
df[['bore', 'stroke', 'compression-ratio', 'horsepower']].corr()
```
</details>
<h2>Continuous Numerical Variables:</h2>
<p>Continuous numerical variables are variables that may contain any value within some range. They can be of type "int64" or "float64". A great way to visualize these variables is by using scatterplots with fitted lines.</p>
<p>In order to start understanding the (linear) relationship between an individual variable and the price, we can use "regplot" which plots the scatterplot plus the fitted regression line for the data.</p>
Let's see several examples of different linear relationships:
<h3>Positive Linear Relationship</h4>
Let's find the scatterplot of "engine-size" and "price".
```
# Engine size as potential predictor variable of price
sns.regplot(x="engine-size", y="price", data=df)
plt.ylim(0,)
plt.show()
```
<p>As the engine-size goes up, the price goes up: this indicates a positive direct correlation between these two variables. Engine size seems like a pretty good predictor of price since the regression line is almost a perfect diagonal line.</p>
We can examine the correlation between 'engine-size' and 'price' and see that it's approximately 0.87.
```
df[["engine-size", "price"]].corr()
```
Highway mpg is a potential predictor variable of price. Let's find the scatterplot of "highway-mpg" and "price".
```
sns.regplot(x="highway-mpg", y="price", data=df)
plt.ylim(0,)
```
<p>As highway-mpg goes up, the price goes down: this indicates an inverse/negative relationship between these two variables. Highway mpg could potentially be a predictor of price.</p>
We can examine the correlation between 'highway-mpg' and 'price' and see it's approximately -0.704.
```
df[['highway-mpg', 'price']].corr()
```
<h3>Weak Linear Relationship</h3>
Let's see if "peak-rpm" is a predictor variable of "price".
```
sns.regplot(x="peak-rpm", y="price", data=df)
```
<p>Peak rpm does not seem like a good predictor of the price at all since the regression line is close to horizontal. Also, the data points are very scattered and far from the fitted line, showing lots of variability. Therefore, it's not a reliable variable.</p>
We can examine the correlation between 'peak-rpm' and 'price' and see it's approximately -0.101616.
```
df[['peak-rpm','price']].corr()
```
<div class="alert alert-danger alertdanger" style="margin-top: 20px">
<h1> Question 3 a): </h1>
<p>Find the correlation between x="stroke" and y="price".</p>
<p>Hint: if you would like to select those columns, use the following syntax: df[["stroke","price"]]. </p>
</div>
```
# Write your code below and press Shift+Enter to execute
df[['stroke','price']].corr()
```
<details><summary>Click here for the solution</summary>
```python
#The correlation is 0.0823, the non-diagonal elements of the table.
df[["stroke","price"]].corr()
```
</details>
<div class="alert alert-danger alertdanger" style="margin-top: 20px">
<h1>Question 3 b):</h1>
<p>Given the correlation results between "price" and "stroke", do you expect a linear relationship?</p>
<p>Verify your results using the function "regplot()".</p>
</div>
```
# Write your code below and press Shift+Enter to execute
sns.regplot(x='stroke',y='price',data=df)
plt.ylim(0,)
```
<details><summary>Click here for the solution</summary>
```python
#There is a weak correlation between the variable 'stroke' and 'price.' as such regression will not work well. We can see this using "regplot" to demonstrate this.
#Code:
sns.regplot(x="stroke", y="price", data=df)
```
</details>
<h3>Categorical Variables</h3>
<p>These are variables that describe a 'characteristic' of a data unit, and are selected from a small group of categories. The categorical variables can have the type "object" or "int64". A good way to visualize categorical variables is by using boxplots.</p>
Let's look at the relationship between "body-style" and "price".
```
sns.boxplot(x="body-style", y="price", data=df)
```
<p>We see that the distributions of price between the different body-style categories have a significant overlap, so body-style would not be a good predictor of price. Let's examine engine "engine-location" and "price":</p>
```
sns.boxplot(x="engine-location", y="price", data=df)
```
<p>Here we see that the distribution of price between these two engine-location categories, front and rear, are distinct enough to take engine-location as a potential good predictor of price.</p>
Let's examine "drive-wheels" and "price".
```
# drive-wheels
sns.boxplot(x="drive-wheels", y="price", data=df)
```
<p>Here we see that the distribution of price between the different drive-wheels categories differs. As such, drive-wheels could potentially be a predictor of price.</p>
<h2 id="discriptive_statistics">3. Descriptive Statistical Analysis</h2>
<p>Let's first take a look at the variables by utilizing a description method.</p>
<p>The <b>describe</b> function automatically computes basic statistics for all continuous variables. Any NaN values are automatically skipped in these statistics.</p>
This will show:
<ul>
<li>the count of that variable</li>
<li>the mean</li>
<li>the standard deviation (std)</li>
<li>the minimum value</li>
<li>the IQR (Interquartile Range: 25%, 50% and 75%)</li>
<li>the maximum value</li>
<ul>
We can apply the method "describe" as follows:
```
df.describe()
```
The default setting of "describe" skips variables of type object. We can apply the method "describe" on the variables of type 'object' as follows:
```
df.describe(include=['object'])
```
<h3>Value Counts</h3>
<p>Value counts is a good way of understanding how many units of each characteristic/variable we have. We can apply the "value_counts" method on the column "drive-wheels". Don’t forget the method "value_counts" only works on pandas series, not pandas dataframes. As a result, we only include one bracket <code>df['drive-wheels']</code>, not two brackets <code>df[['drive-wheels']]</code>.</p>
```
df['drive-wheels'].value_counts()
```
We can convert the series to a dataframe as follows:
```
df['drive-wheels'].value_counts().to_frame()
```
Let's repeat the above steps but save the results to the dataframe "drive_wheels_counts" and rename the column 'drive-wheels' to 'value_counts'.
```
drive_wheels_counts = df['drive-wheels'].value_counts().to_frame()
drive_wheels_counts.rename(columns={'drive-wheels': 'value_counts'}, inplace=True)
drive_wheels_counts
```
Now let's rename the index to 'drive-wheels':
```
drive_wheels_counts.index.name = 'drive-wheels'
drive_wheels_counts
```
We can repeat the above process for the variable 'engine-location'.
```
# engine-location as variable
engine_loc_counts = df['engine-location'].value_counts().to_frame()
engine_loc_counts.rename(columns={'engine-location': 'value_counts'}, inplace=True)
engine_loc_counts.index.name = 'engine-location'
engine_loc_counts.head(10)
```
<p>After examining the value counts of the engine location, we see that engine location would not be a good predictor variable for the price. This is because we only have three cars with a rear engine and 198 with an engine in the front, so this result is skewed. Thus, we are not able to draw any conclusions about the engine location.</p>
<h2 id="basic_grouping">4. Basics of Grouping</h2>
<p>The "groupby" method groups data by different categories. The data is grouped based on one or several variables, and analysis is performed on the individual groups.</p>
<p>For example, let's group by the variable "drive-wheels". We see that there are 3 different categories of drive wheels.</p>
```
df['drive-wheels'].unique()
```
<p>If we want to know, on average, which type of drive wheel is most valuable, we can group "drive-wheels" and then average them.</p>
<p>We can select the columns 'drive-wheels', 'body-style' and 'price', then assign it to the variable "df_group_one".</p>
```
df_group_one = df[['drive-wheels','body-style','price']]
```
We can then calculate the average price for each of the different categories of data.
```
# grouping results
df_group_one = df_group_one.groupby(['drive-wheels'],as_index=False).mean()
df_group_one
```
<p>From our data, it seems rear-wheel drive vehicles are, on average, the most expensive, while 4-wheel and front-wheel are approximately the same in price.</p>
<p>You can also group by multiple variables. For example, let's group by both 'drive-wheels' and 'body-style'. This groups the dataframe by the unique combination of 'drive-wheels' and 'body-style'. We can store the results in the variable 'grouped_test1'.</p>
```
# grouping results
df_gptest = df[['drive-wheels','body-style','price']]
grouped_test1 = df_gptest.groupby(['drive-wheels','body-style'],as_index=False).mean()
grouped_test1
```
<p>This grouped data is much easier to visualize when it is made into a pivot table. A pivot table is like an Excel spreadsheet, with one variable along the column and another along the row. We can convert the dataframe to a pivot table using the method "pivot" to create a pivot table from the groups.</p>
<p>In this case, we will leave the drive-wheels variable as the rows of the table, and pivot body-style to become the columns of the table:</p>
```
grouped_pivot = grouped_test1.pivot(index='drive-wheels',columns='body-style')
grouped_pivot
```
<p>Often, we won't have data for some of the pivot cells. We can fill these missing cells with the value 0, but any other value could potentially be used as well. It should be mentioned that missing data is quite a complex subject and is an entire course on its own.</p>
```
grouped_pivot = grouped_pivot.fillna(0) #fill missing values with 0
grouped_pivot
```
<div class="alert alert-danger alertdanger" style="margin-top: 20px">
<h1>Question 4:</h1>
<p>Use the "groupby" function to find the average "price" of each car based on "body-style".</p>
</div>
```
# Write your code below and press Shift+Enter to execute
group_data = df_gptest.groupby(['body-style'],as_index=False).mean()
group_data
```
<details><summary>Click here for the solution</summary>
```python
# grouping results
df_gptest2 = df[['body-style','price']]
grouped_test_bodystyle = df_gptest2.groupby(['body-style'],as_index= False).mean()
grouped_test_bodystyle
```
</details>
If you did not import "pyplot", let's do it again.
```
import matplotlib.pyplot as plt
%matplotlib inline
```
<h4>Variables: Drive Wheels and Body Style vs. Price</h4>
Let's use a heat map to visualize the relationship between Body Style vs Price.
```
#use the grouped results
plt.pcolor(grouped_pivot, cmap='RdBu')
plt.colorbar()
plt.show()
```
<p>The heatmap plots the target variable (price) proportional to colour with respect to the variables 'drive-wheel' and 'body-style' on the vertical and horizontal axis, respectively. This allows us to visualize how the price is related to 'drive-wheel' and 'body-style'.</p>
<p>The default labels convey no useful information to us. Let's change that:</p>
```
fig, ax = plt.subplots()
im = ax.pcolor(grouped_pivot, cmap='RdBu')
#label names
row_labels = grouped_pivot.columns.levels[1]
col_labels = grouped_pivot.index
#move ticks and labels to the center
ax.set_xticks(np.arange(grouped_pivot.shape[1]) + 0.5, minor=False)
ax.set_yticks(np.arange(grouped_pivot.shape[0]) + 0.5, minor=False)
#insert labels
ax.set_xticklabels(row_labels, minor=False)
ax.set_yticklabels(col_labels, minor=False)
#rotate label if too long
plt.xticks(rotation=90)
fig.colorbar(im)
plt.show()
```
<p>Visualization is very important in data science, and Python visualization packages provide great freedom. We will go more in-depth in a separate Python visualizations course.</p>
<p>The main question we want to answer in this module is, "What are the main characteristics which have the most impact on the car price?".</p>
<p>To get a better measure of the important characteristics, we look at the correlation of these variables with the car price. In other words: how is the car price dependent on this variable?</p>
<h2 id="correlation_causation">5. Correlation and Causation</h2>
<p><b>Correlation</b>: a measure of the extent of interdependence between variables.</p>
<p><b>Causation</b>: the relationship between cause and effect between two variables.</p>
<p>It is important to know the difference between these two. Correlation does not imply causation. Determining correlation is much simpler the determining causation as causation may require independent experimentation.</p>
<p><b>Pearson Correlation</b></p>
<p>The Pearson Correlation measures the linear dependence between two variables X and Y.</p>
<p>The resulting coefficient is a value between -1 and 1 inclusive, where:</p>
<ul>
<li><b>1</b>: Perfect positive linear correlation.</li>
<li><b>0</b>: No linear correlation, the two variables most likely do not affect each other.</li>
<li><b>-1</b>: Perfect negative linear correlation.</li>
</ul>
<p>Pearson Correlation is the default method of the function "corr". Like before, we can calculate the Pearson Correlation of the of the 'int64' or 'float64' variables.</p>
```
df.corr()
```
Sometimes we would like to know the significant of the correlation estimate.
<b>P-value</b>
<p>What is this P-value? The P-value is the probability value that the correlation between these two variables is statistically significant. Normally, we choose a significance level of 0.05, which means that we are 95% confident that the correlation between the variables is significant.</p>
By convention, when the
<ul>
<li>p-value is $<$ 0.001: we say there is strong evidence that the correlation is significant.</li>
<li>the p-value is $<$ 0.05: there is moderate evidence that the correlation is significant.</li>
<li>the p-value is $<$ 0.1: there is weak evidence that the correlation is significant.</li>
<li>the p-value is $>$ 0.1: there is no evidence that the correlation is significant.</li>
</ul>
We can obtain this information using "stats" module in the "scipy" library.
```
from scipy import stats
```
<h3>Wheel-Base vs. Price</h3>
Let's calculate the Pearson Correlation Coefficient and P-value of 'wheel-base' and 'price'.
```
pearson_coef, p_value = stats.pearsonr(df['wheel-base'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P =", p_value)
```
<h4>Conclusion:</h4>
<p>Since the p-value is $<$ 0.001, the correlation between wheel-base and price is statistically significant, although the linear relationship isn't extremely strong (~0.585).</p>
<h3>Horsepower vs. Price</h3>
Let's calculate the Pearson Correlation Coefficient and P-value of 'horsepower' and 'price'.
```
pearson_coef, p_value = stats.pearsonr(df['horsepower'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value)
```
<h4>Conclusion:</h4>
<p>Since the p-value is $<$ 0.001, the correlation between horsepower and price is statistically significant, and the linear relationship is quite strong (~0.809, close to 1).</p>
<h3>Length vs. Price</h3>
Let's calculate the Pearson Correlation Coefficient and P-value of 'length' and 'price'.
```
pearson_coef, p_value = stats.pearsonr(df['length'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value)
```
<h4>Conclusion:</h4>
<p>Since the p-value is $<$ 0.001, the correlation between length and price is statistically significant, and the linear relationship is moderately strong (~0.691).</p>
<h3>Width vs. Price</h3>
Let's calculate the Pearson Correlation Coefficient and P-value of 'width' and 'price':
```
pearson_coef, p_value = stats.pearsonr(df['width'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P =", p_value )
```
#### Conclusion:
Since the p-value is < 0.001, the correlation between width and price is statistically significant, and the linear relationship is quite strong (\~0.751).
### Curb-Weight vs. Price
Let's calculate the Pearson Correlation Coefficient and P-value of 'curb-weight' and 'price':
```
pearson_coef, p_value = stats.pearsonr(df['curb-weight'], df['price'])
print( "The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value)
```
<h4>Conclusion:</h4>
<p>Since the p-value is $<$ 0.001, the correlation between curb-weight and price is statistically significant, and the linear relationship is quite strong (~0.834).</p>
<h3>Engine-Size vs. Price</h3>
Let's calculate the Pearson Correlation Coefficient and P-value of 'engine-size' and 'price':
```
pearson_coef, p_value = stats.pearsonr(df['engine-size'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P =", p_value)
```
<h4>Conclusion:</h4>
<p>Since the p-value is $<$ 0.001, the correlation between engine-size and price is statistically significant, and the linear relationship is very strong (~0.872).</p>
<h3>Bore vs. Price</h3>
Let's calculate the Pearson Correlation Coefficient and P-value of 'bore' and 'price':
```
pearson_coef, p_value = stats.pearsonr(df['bore'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value )
```
<h4>Conclusion:</h4>
<p>Since the p-value is $<$ 0.001, the correlation between bore and price is statistically significant, but the linear relationship is only moderate (~0.521).</p>
We can relate the process for each 'city-mpg' and 'highway-mpg':
<h3>City-mpg vs. Price</h3>
```
pearson_coef, p_value = stats.pearsonr(df['city-mpg'], df['price'])
print("The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value)
```
<h4>Conclusion:</h4>
<p>Since the p-value is $<$ 0.001, the correlation between city-mpg and price is statistically significant, and the coefficient of about -0.687 shows that the relationship is negative and moderately strong.</p>
<h3>Highway-mpg vs. Price</h3>
```
pearson_coef, p_value = stats.pearsonr(df['highway-mpg'], df['price'])
print( "The Pearson Correlation Coefficient is", pearson_coef, " with a P-value of P = ", p_value )
```
#### Conclusion:
Since the p-value is < 0.001, the correlation between highway-mpg and price is statistically significant, and the coefficient of about -0.705 shows that the relationship is negative and moderately strong.
<h2 id="anova">6. ANOVA</h2>
<h3>ANOVA: Analysis of Variance</h3>
<p>The Analysis of Variance (ANOVA) is a statistical method used to test whether there are significant differences between the means of two or more groups. ANOVA returns two parameters:</p>
<p><b>F-test score</b>: ANOVA assumes the means of all groups are the same, calculates how much the actual means deviate from the assumption, and reports it as the F-test score. A larger score means there is a larger difference between the means.</p>
<p><b>P-value</b>: P-value tells how statistically significant our calculated score value is.</p>
<p>If our price variable is strongly correlated with the variable we are analyzing, we expect ANOVA to return a sizeable F-test score and a small p-value.</p>
<h3>Drive Wheels</h3>
<p>Since ANOVA analyzes the difference between different groups of the same variable, the groupby function will come in handy. Because the ANOVA algorithm averages the data automatically, we do not need to take the average before hand.</p>
<p>To see if different types of 'drive-wheels' impact 'price', we group the data.</p>
```
grouped_test2=df_gptest[['drive-wheels', 'price']].groupby(['drive-wheels'])
grouped_test2.head(2)
df_gptest
```
We can obtain the values of the method group using the method "get_group".
```
grouped_test2.get_group('4wd')['price']
```
We can use the function 'f_oneway' in the module 'stats' to obtain the <b>F-test score</b> and <b>P-value</b>.
```
# ANOVA
f_val, p_val = stats.f_oneway(grouped_test2.get_group('fwd')['price'], grouped_test2.get_group('rwd')['price'], grouped_test2.get_group('4wd')['price'])
print( "ANOVA results: F=", f_val, ", P =", p_val)
```
This is a great result with a large F-test score showing a strong correlation and a P-value of almost 0 implying almost certain statistical significance. But does this mean all three tested groups are all this highly correlated?
Let's examine them separately.
#### fwd and rwd
```
f_val, p_val = stats.f_oneway(grouped_test2.get_group('fwd')['price'], grouped_test2.get_group('rwd')['price'])
print( "ANOVA results: F=", f_val, ", P =", p_val )
```
Let's examine the other groups.
#### 4wd and rwd
```
f_val, p_val = stats.f_oneway(grouped_test2.get_group('4wd')['price'], grouped_test2.get_group('rwd')['price'])
print( "ANOVA results: F=", f_val, ", P =", p_val)
```
<h4>4wd and fwd</h4>
```
f_val, p_val = stats.f_oneway(grouped_test2.get_group('4wd')['price'], grouped_test2.get_group('fwd')['price'])
print("ANOVA results: F=", f_val, ", P =", p_val)
```
<h3>Conclusion: Important Variables</h3>
<p>We now have a better idea of what our data looks like and which variables are important to take into account when predicting the car price. We have narrowed it down to the following variables:</p>
Continuous numerical variables:
<ul>
<li>Length</li>
<li>Width</li>
<li>Curb-weight</li>
<li>Engine-size</li>
<li>Horsepower</li>
<li>City-mpg</li>
<li>Highway-mpg</li>
<li>Wheel-base</li>
<li>Bore</li>
</ul>
Categorical variables:
<ul>
<li>Drive-wheels</li>
</ul>
<p>As we now move into building machine learning models to automate our analysis, feeding the model with variables that meaningfully affect our target variable will improve our model's prediction performance.</p>
### Thank you for completing this lab!
## Author
<a href="https://www.linkedin.com/in/joseph-s-50398b136/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDA0101ENSkillsNetwork20235326-2021-01-01" target="_blank">Joseph Santarcangelo</a>
### Other Contributors
<a href="https://www.linkedin.com/in/mahdi-noorian-58219234/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDA0101ENSkillsNetwork20235326-2021-01-01" target="_blank">Mahdi Noorian PhD</a>
Bahare Talayian
Eric Xiao
Steven Dong
Parizad
Hima Vasudevan
<a href="https://www.linkedin.com/in/fiorellawever/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDA0101ENSkillsNetwork20235326-2021-01-01" target="_blank">Fiorella Wenver</a>
<a href="https:// https://www.linkedin.com/in/yi-leng-yao-84451275/ " target="_blank" >Yi Yao</a>.
## Change Log
| Date (YYYY-MM-DD) | Version | Changed By | Change Description |
| ----------------- | ------- | ---------- | ---------------------------------- |
| 2020-10-30 | 2.1 | Lakshmi | changed URL of csv |
| 2020-08-27 | 2.0 | Lavanya | Moved lab to course repo in GitLab |
<hr>
## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/>
| github_jupyter |
Thanks for:
https://www.kaggle.com/ttahara/osic-baseline-lgbm-with-custom-metric
https://www.kaggle.com/carlossouza/bayesian-experiments
## About
In this competition, participants are requiered to predict `FVC` and its **_`Confidence`_**.
Here, I trained Lightgbm to predict them at the same time by utilizing custom metric.
Most of codes in this notebook are forked from @yasufuminakama 's [lgbm baseline](https://www.kaggle.com/yasufuminakama/osic-lgb-baseline). Thanks!
## Library
```
import os
import operator
import typing as tp
from logging import getLogger, INFO, StreamHandler, FileHandler, Formatter
from functools import partial
import numpy as np
import pandas as pd
import pymc3 as pm
import random
import math
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import StratifiedKFold, GroupKFold, KFold
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler,LabelEncoder
import category_encoders as ce
from PIL import Image
import cv2
import pydicom
import torch
import lightgbm as lgb
from sklearn.linear_model import Ridge
import warnings
warnings.filterwarnings("ignore")
```
## Utils
```
def get_logger(filename='log'):
logger = getLogger(__name__)
logger.setLevel(INFO)
handler1 = StreamHandler()
handler1.setFormatter(Formatter("%(message)s"))
handler2 = FileHandler(filename=f"{filename}.log")
handler2.setFormatter(Formatter("%(message)s"))
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
logger = get_logger()
def seed_everything(seed=777):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
```
## Config
```
OUTPUT_DICT = './'
ID = 'Patient_Week'
TARGET = 'FVC'
SEED = 42
seed_everything(seed=SEED)
N_FOLD = 4
```
# Data Loading
```
train = pd.read_csv('../input/osic-pulmonary-fibrosis-progression/train.csv')
tr = train.copy()
train[ID] = train['Patient'].astype(str) + '_' + train['Weeks'].astype(str)
print(train.shape)
train.head()
# construct train input
output = pd.DataFrame()
gb = train.groupby('Patient')
tk0 = tqdm(gb, total=len(gb))
for _, usr_df in tk0:
usr_output = pd.DataFrame()
for week, tmp in usr_df.groupby('Weeks'):
rename_cols = {'Weeks': 'base_Week', 'FVC': 'base_FVC', 'Percent': 'base_Percent', 'Age': 'base_Age'}
tmp = tmp.drop(columns='Patient_Week').rename(columns=rename_cols)
drop_cols = ['Age', 'Sex', 'SmokingStatus', 'Percent']
_usr_output = usr_df.drop(columns=drop_cols).rename(columns={'Weeks': 'predict_Week'}).merge(tmp, on='Patient')
_usr_output['Week_passed'] = _usr_output['predict_Week'] - _usr_output['base_Week']
usr_output = pd.concat([usr_output, _usr_output])
output = pd.concat([output, usr_output])
train = output[output['Week_passed']!=0].reset_index(drop=True)
print(train.shape)
train.head()
# construct test input
test = pd.read_csv('../input/osic-pulmonary-fibrosis-progression/test.csv')
ts = test.copy()
```
# Create test dataset with Bayesian approach
https://colab.research.google.com/drive/13WTKUlpYEtN0RNhzax_j8gbf84FuU1CF?authuser=1#scrollTo=jUeafaYrv9Em
```
# PercentをFVCに合わせて補正
# X * Percent / 100 = FVC
# X = FVC * 100 / Percent
dic = {}
for i in range(len(test)):
X = int(test.FVC[i]*100/test.Percent[i])
dic[test.Patient[i]] = X
dic
tr = pd.concat([tr, ts], axis=0, ignore_index=True).drop_duplicates()
le_id = LabelEncoder()
tr['PatientID'] = le_id.fit_transform(tr['Patient'])
n_patients = tr['Patient'].nunique()
FVC_obs = tr['FVC'].values
Weeks = tr['Weeks'].values
PatientID = tr['PatientID'].values
with pm.Model() as model_a:
# create shared variables that can be changed later on
FVC_obs_shared = pm.Data("FVC_obs_shared", FVC_obs)
Weeks_shared = pm.Data('Weeks_shared', Weeks)
PatientID_shared = pm.Data('PatientID_shared', PatientID)
mu_a = pm.Normal('mu_a', mu=1700., sigma=400)
sigma_a = pm.HalfNormal('sigma_a', 1000.)
mu_b = pm.Normal('mu_b', mu=-4., sigma=1)
sigma_b = pm.HalfNormal('sigma_b', 5.)
a = pm.Normal('a', mu=mu_a, sigma=sigma_a, shape=n_patients)
b = pm.Normal('b', mu=mu_b, sigma=sigma_b, shape=n_patients)
# Model error
sigma = pm.HalfNormal('sigma', 150.)
FVC_est = a[PatientID_shared] + b[PatientID_shared] * Weeks_shared
# Data likelihood
FVC_like = pm.Normal('FVC_like', mu=FVC_est,
sigma=sigma, observed=FVC_obs_shared)
# Fitting the model
trace_a = pm.sample(2000, tune=2000, target_accept=.9, init="adapt_diag")
pred_template = []
for p in ts['Patient'].unique():
df = pd.DataFrame(columns=['PatientID', 'Weeks'])
df['Weeks'] = np.arange(-12, 134)
df['Patient'] = p
pred_template.append(df)
pred_template = pd.concat(pred_template, ignore_index=True)
pred_template['PatientID'] = le_id.transform(pred_template['Patient'])
with model_a:
pm.set_data({
"PatientID_shared": pred_template['PatientID'].values.astype(int),
"Weeks_shared": pred_template['Weeks'].values.astype(int),
"FVC_obs_shared": np.zeros(len(pred_template)).astype(int),
})
post_pred = pm.sample_posterior_predictive(trace_a)
df = pd.DataFrame(columns=['Patient', 'Weeks', 'Patient_Week', 'FVC', 'Confidence'])
df['Patient'] = pred_template['Patient']
df['Weeks'] = pred_template['Weeks']
df['Patient_Week'] = df['Patient'] + '_' + df['Weeks'].astype(str)
df['FVC'] = post_pred['FVC_like'].T.mean(axis=1)
df['Confidence'] = post_pred['FVC_like'].T.std(axis=1)
final = df[['Patient_Week', 'FVC', 'Confidence']]
final.to_csv('submission.csv', index=False)
print(final.shape)
final
test = test.rename(columns={'Weeks': 'base_Week', 'FVC': 'base_FVC', 'Percent': 'base_Percent', 'Age': 'base_Age'})
submission = pd.read_csv('../input/osic-pulmonary-fibrosis-progression/sample_submission.csv')
submission['Patient'] = submission['Patient_Week'].apply(lambda x: x.split('_')[0])
submission['predict_Week'] = submission['Patient_Week'].apply(lambda x: x.split('_')[1]).astype(int)
test = submission.drop(columns=['FVC', 'Confidence']).merge(test, on='Patient')
test['Week_passed'] = test['predict_Week'] - test['base_Week']
print(test.shape)
test
test = test.drop(columns='base_FVC').merge(final[["Patient_Week", "FVC"]], on='Patient_Week')
test
# Percent = FVC * 100 /X
for i in range(len(test)):
Percent = test.FVC[i]*100 / dic[test.Patient[i]]
test.base_Percent[i] = Percent
test
#getting FVC for base week and setting it as base_FVC of patient
def get_base_FVC(data):
df = data.copy()
df['min_week'] = df.groupby('Patient')['predict_Week'].transform('min')
base = df.loc[df.predict_Week == df.min_week][['Patient','FVC']].copy()
base.columns = ['Patient','base_FVC']
base['nb']=1
base['nb'] = base.groupby('Patient')['nb'].transform('cumsum')
base = base[base.nb==1]
base.drop('nb',axis =1,inplace=True)
df = df.merge(base,on="Patient",how='left')
df.drop(['min_week'], axis = 1)
return df
#For Inference
#getting Number of CT
def get_N_CT(data, mode="test"):
df = data.copy()
N_CT = []
for pt_id in df.Patient:
if mode is "test":
png_dir = os.path.join(image_folder, pt_id)
if mode is "train":
png_dir = os.path.join(data_dir, 'train', pt_id)
files = os.listdir(png_dir)
N_CT.append(len(files))
df["N_CT"] = N_CT
return df
test["min_Weeks"] = np.nan
test = get_base_FVC(test)
test
test = test.drop(['min_Weeks', 'min_week'], axis = 1)
test
submission = pd.read_csv('../input/osic-pulmonary-fibrosis-progression/sample_submission.csv')
print(submission.shape)
submission.head()
```
# Prepare folds
```
folds = train[[ID, 'Patient', TARGET]].copy()
#Fold = KFold(n_splits=N_FOLD, shuffle=True, random_state=SEED)
Fold = GroupKFold(n_splits=N_FOLD)
groups = folds['Patient'].values
for n, (train_index, val_index) in enumerate(Fold.split(folds, folds[TARGET], groups)):
folds.loc[val_index, 'fold'] = int(n)
folds['fold'] = folds['fold'].astype(int)
folds
```
## Custom Objective / Metric
The competition evaluation metric is:
$
\displaystyle \sigma_{clipped} = \max \left ( \sigma, 70 \right ) \\
\displaystyle \Delta = \min \left ( \|FVC_{ture} - FVC_{predicted}\|, 1000 \right ) \\
\displaystyle f_{metric} = - \frac{\sqrt{2} \Delta}{\sigma_{clipped}} - \ln \left( \sqrt{2} \sigma_{clipped} \right) .
$
This is too complex to directly optimize by custom metric.
Here I use negative loglilelihood loss (_NLL_) of gaussian.
Let $FVC_{ture}$ is $t$ and $FVC_{predicted}$ is $\mu$, the _NLL_ $l$ is formulated by:
$
\displaystyle l\left( t, \mu, \sigma \right) =
-\ln \left [ \frac{1}{\sqrt{2 \pi} \sigma} \exp \left \{ - \frac{\left(t - \mu \right)^2}{2 \sigma^2} \right \} \right ]
= \frac{\left(t - \mu \right)^2}{2 \sigma^2} + \ln \left( \sqrt{2 \pi} \sigma \right).
$
`grad` and `hess` are calculated as follows:
$
\displaystyle \frac{\partial l}{\partial \mu } = -\frac{t - \mu}{\sigma^2} \ , \ \frac{\partial^2 l}{\partial \mu^2 } = \frac{1}{\sigma^2}
$
$
\displaystyle \frac{\partial l}{\partial \sigma}
=-\frac{\left(t - \mu \right)^2}{\sigma^3} + \frac{1}{\sigma} = \frac{1}{\sigma} \left\{ 1 - \left ( \frac{t - \mu}{\sigma} \right)^2 \right \}
\\
\displaystyle \frac{\partial^2 l}{\partial \sigma^2}
= -\frac{1}{\sigma^2} \left\{ 1 - \left ( \frac{t - \mu}{\sigma} \right)^2 \right \}
+\frac{1}{\sigma} \frac{2 \left(t - \mu \right)^2 }{\sigma^3}
= -\frac{1}{\sigma^2} \left\{ 1 - 3 \left ( \frac{t - \mu}{\sigma} \right)^2 \right \}
$
For numerical stability, I replace $\sigma$ with $\displaystyle \tilde{\sigma} := \log\left(1 + \mathrm{e}^{\sigma} \right).$
$
\displaystyle l'\left( t, \mu, \sigma \right)
= \frac{\left(t - \mu \right)^2}{2 \tilde{\sigma}^2} + \ln \left( \sqrt{2 \pi} \tilde{\sigma} \right).
$
$
\displaystyle \frac{\partial l'}{\partial \mu } = -\frac{t - \mu}{\tilde{\sigma}^2} \ , \ \frac{\partial^2 l}{\partial \mu^2 } = \frac{1}{\tilde{\sigma}^2}
$
<br>
$
\displaystyle \frac{\partial l'}{\partial \sigma}
= \frac{1}{\tilde{\sigma}} \left\{ 1 - \left ( \frac{t - \mu}{\tilde{\sigma}} \right)^2 \right \} \frac{\partial \tilde{\sigma}}{\partial \sigma}
\\
\displaystyle \frac{\partial^2 l'}{\partial \sigma^2}
= -\frac{1}{\tilde{\sigma}^2} \left\{ 1 - 3 \left ( \frac{t - \mu}{\tilde{\sigma}} \right)^2 \right \}
\left( \frac{\partial \tilde{\sigma}}{\partial \sigma} \right) ^2
+\frac{1}{\tilde{\sigma}} \left\{ 1 - \left ( \frac{t - \mu}{\tilde{\sigma}} \right)^2 \right \} \frac{\partial^2 \tilde{\sigma}}{\partial \sigma^2}
$
, where
$
\displaystyle
\frac{\partial \tilde{\sigma}}{\partial \sigma} = \frac{1}{1 + \mathrm{e}^{-\sigma}} \\
\displaystyle
\frac{\partial^2 \tilde{\sigma}}{\partial^2 \sigma} = \frac{\mathrm{e}^{-\sigma}}{\left( 1 + \mathrm{e}^{-\sigma} \right)^2}
= \frac{\partial \tilde{\sigma}}{\partial \sigma} \left( 1 - \frac{\partial \tilde{\sigma}}{\partial \sigma} \right)
$
```
class OSICLossForLGBM:
"""
Custom Loss for LightGBM.
* Objective: return grad & hess of NLL of gaussian
* Evaluation: return competition metric
"""
def __init__(self, epsilon: float=1) -> None:
"""Initialize."""
self.name = "osic_loss"
self.n_class = 2 # FVC & Confidence
self.epsilon = epsilon
def __call__(self, preds: np.ndarray, labels: np.ndarray, weight: tp.Optional[np.ndarray]=None) -> float:
"""Calc loss."""
sigma_clip = np.maximum(preds[:, 1], 70)
Delta = np.minimum(np.abs(preds[:, 0] - labels), 1000)
loss_by_sample = - np.sqrt(2) * Delta / sigma_clip - np.log(np.sqrt(2) * sigma_clip)
loss = np.average(loss_by_sample, weight)
return loss
def _calc_grad_and_hess(
self, preds: np.ndarray, labels: np.ndarray, weight: tp.Optional[np.ndarray]=None
) -> tp.Tuple[np.ndarray]:
"""Calc Grad and Hess"""
mu = preds[:, 0]
sigma = preds[:, 1]
sigma_t = np.log(1 + np.exp(sigma))
grad_sigma_t = 1 / (1 + np.exp(- sigma))
hess_sigma_t = grad_sigma_t * (1 - grad_sigma_t)
grad = np.zeros_like(preds)
hess = np.zeros_like(preds)
grad[:, 0] = - (labels - mu) / sigma_t ** 2
hess[:, 0] = 1 / sigma_t ** 2
tmp = ((labels - mu) / sigma_t) ** 2
grad[:, 1] = 1 / sigma_t * (1 - tmp) * grad_sigma_t
hess[:, 1] = (
- 1 / sigma_t ** 2 * (1 - 3 * tmp) * grad_sigma_t ** 2
+ 1 / sigma_t * (1 - tmp) * hess_sigma_t
)
if weight is not None:
grad = grad * weight[:, None]
hess = hess * weight[:, None]
return grad, hess
def return_loss(self, preds: np.ndarray, data: lgb.Dataset) -> tp.Tuple[str, float, bool]:
"""Return Loss for lightgbm"""
labels = data.get_label()
weight = data.get_weight()
n_example = len(labels)
# # reshape preds: (n_class * n_example,) => (n_class, n_example) => (n_example, n_class)
preds = preds.reshape(self.n_class, n_example).T
# # calc loss
loss = self(preds, labels, weight)
return self.name, loss, True
def return_grad_and_hess(self, preds: np.ndarray, data: lgb.Dataset) -> tp.Tuple[np.ndarray]:
"""Return Grad and Hess for lightgbm"""
labels = data.get_label()
weight = data.get_weight()
n_example = len(labels)
# # reshape preds: (n_class * n_example,) => (n_class, n_example) => (n_example, n_class)
preds = preds.reshape(self.n_class, n_example).T
# # calc grad and hess.
grad, hess = self._calc_grad_and_hess(preds, labels, weight)
# # reshape grad, hess: (n_example, n_class) => (n_class, n_example) => (n_class * n_example,)
grad = grad.T.reshape(n_example * self.n_class)
hess = hess.T.reshape(n_example * self.n_class)
return grad, hess
```
## Training Utils
```
#===========================================================
# model
#===========================================================
def run_single_lightgbm(
model_param, fit_param, train_df, test_df, folds, features, target,
fold_num=0, categorical=[], my_loss=None,
):
trn_idx = folds[folds.fold != fold_num].index
val_idx = folds[folds.fold == fold_num].index
logger.info(f'len(trn_idx) : {len(trn_idx)}')
logger.info(f'len(val_idx) : {len(val_idx)}')
if categorical == []:
trn_data = lgb.Dataset(
train_df.iloc[trn_idx][features], label=target.iloc[trn_idx])
val_data = lgb.Dataset(
train_df.iloc[val_idx][features], label=target.iloc[val_idx])
else:
trn_data = lgb.Dataset(
train_df.iloc[trn_idx][features], label=target.iloc[trn_idx],
categorical_feature=categorical)
val_data = lgb.Dataset(
train_df.iloc[val_idx][features], label=target.iloc[val_idx],
categorical_feature=categorical)
oof = np.zeros((len(train_df), 2))
predictions = np.zeros((len(test_df), 2))
best_model_str = [""]
clf = lgb.train(
model_param, trn_data, **fit_param,
valid_sets=[trn_data, val_data],
fobj=my_loss.return_grad_and_hess,
feval=my_loss.return_loss,
)
oof[val_idx] = clf.predict(train_df.iloc[val_idx][features], num_iteration=clf.best_iteration)
fold_importance_df = pd.DataFrame()
fold_importance_df["Feature"] = features
fold_importance_df["importance"] = clf.feature_importance(importance_type='gain')
fold_importance_df["fold"] = fold_num
predictions += clf.predict(test_df[features], num_iteration=clf.best_iteration)
# RMSE
logger.info("fold{} RMSE score: {:<8.5f}".format(
fold_num, np.sqrt(mean_squared_error(target[val_idx], oof[val_idx, 0]))))
# Competition Metric
logger.info("fold{} Metric: {:<8.5f}".format(
fold_num, my_loss(oof[val_idx], target[val_idx])))
return oof, predictions, fold_importance_df
def run_kfold_lightgbm(
model_param, fit_param, train, test, folds,
features, target, n_fold=5, categorical=[], my_loss=None,
):
logger.info(f"================================= {n_fold}fold lightgbm =================================")
oof = np.zeros((len(train), 2))
predictions = np.zeros((len(test), 2))
feature_importance_df = pd.DataFrame()
for fold_ in range(n_fold):
print("Fold {}".format(fold_))
_oof, _predictions, fold_importance_df =\
run_single_lightgbm(
model_param, fit_param, train, test, folds,
features, target, fold_num=fold_, categorical=categorical, my_loss=my_loss
)
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
oof += _oof
predictions += _predictions / n_fold
# RMSE
logger.info("CV RMSE score: {:<8.5f}".format(np.sqrt(mean_squared_error(target, oof[:, 0]))))
# Metric
logger.info("CV Metric: {:<8.5f}".format(my_loss(oof, target)))
logger.info(f"=========================================================================================")
return feature_importance_df, predictions, oof
def show_feature_importance(feature_importance_df, name):
cols = (feature_importance_df[["Feature", "importance"]]
.groupby("Feature")
.mean()
.sort_values(by="importance", ascending=False)[:50].index)
best_features = feature_importance_df.loc[feature_importance_df.Feature.isin(cols)]
#plt.figure(figsize=(8, 16))
plt.figure(figsize=(6, 4))
sns.barplot(x="importance", y="Feature", data=best_features.sort_values(by="importance", ascending=False))
plt.title('Features importance (averaged/folds)')
plt.tight_layout()
plt.savefig(OUTPUT_DICT+f'feature_importance_{name}.png')
```
## predict FVC & Confidence(signa)
```
target = train[TARGET]
test[TARGET] = np.nan
# features
cat_features = ['Sex', 'SmokingStatus']
num_features = [c for c in test.columns if (test.dtypes[c] != 'object') & (c not in cat_features)]
features = num_features + cat_features
drop_features = [ID, TARGET, 'predict_Week', 'base_Week']
features = [c for c in features if c not in drop_features]
if cat_features:
ce_oe = ce.OrdinalEncoder(cols=cat_features, handle_unknown='impute')
ce_oe.fit(train)
train = ce_oe.transform(train)
test = ce_oe.transform(test)
lgb_model_param = {
'num_class': 2,
# 'objective': 'regression',
'metric': 'None',
'boosting_type': 'gbdt',
'learning_rate': 5e-02,
'seed': SEED,
"subsample": 0.4,
"subsample_freq": 1,
'max_depth': 1,
'verbosity': -1,
}
lgb_fit_param = {
"num_boost_round": 10000,
"verbose_eval":100,
"early_stopping_rounds": 500,
}
feature_importance_df, predictions, oof = run_kfold_lightgbm(
lgb_model_param, lgb_fit_param, train, test,
folds, features, target,
n_fold=N_FOLD, categorical=cat_features, my_loss=OSICLossForLGBM())
show_feature_importance(feature_importance_df, TARGET)
oof[:5, :]
predictions[:5]
train["FVC_pred"] = oof[:, 0]
train["Confidence"] = oof[:, 1]
test["FVC_pred"] = predictions[:, 0]
test["Confidence"] = predictions[:, 1]
```
# Submission
```
submission.head()
sub = submission.drop(columns=['FVC', 'Confidence']).merge(test[['Patient_Week', 'FVC_pred', 'Confidence']],
on='Patient_Week')
sub.columns = submission.columns
sub.to_csv('submission.csv', index=False)
sub.head()
```
| github_jupyter |
## Environment Initialization
This cell is used to initlialize necessary environments for pipcook to run, including Node.js 12.x.
```
!wget -P /tmp https://nodejs.org/dist/v12.19.0/node-v12.19.0-linux-x64.tar.xz
!rm -rf /usr/local/lib/nodejs
!mkdir -p /usr/local/lib/nodejs
!tar -xJf /tmp/node-v12.19.0-linux-x64.tar.xz -C /usr/local/lib/nodejs
!sh -c 'echo "export PATH=/usr/local/lib/nodejs/node-v12.19.0-linux-x64/bin:\$PATH" >> /etc/profile'
!rm -f /usr/bin/node
!rm -f /usr/bin/npm
!ln -s /usr/local/lib/nodejs/node-v12.19.0-linux-x64/bin/node /usr/bin/node
!ln -s /usr/local/lib/nodejs/node-v12.19.0-linux-x64/bin/npm /usr/bin/npm
!npm config delete registry
import os
PATH_ENV = os.environ['PATH']
%env PATH=/usr/local/lib/nodejs/node-v12.19.0-linux-x64/bin:${PATH_ENV}
```
## install pipcook cli tool
pipcook-cli is the cli tool for pipcook for any operations later, including installing pipcook, run pipcook jobs and checking logs.
```
!npm install @pipcook/cli -g
!rm -f /usr/bin/pipcook
!ln -s /usr/local/lib/nodejs/node-v12.19.0-linux-x64/bin/pipcook /usr/bin/pipcook
```
# Classify images of UI components
## Background
Have you encountered such a scenario in the front-end business: there are some images in your hand, and you want an automatic way to identify what front-end components these images are, whether it is a button, a navigation bar, or a form? This is a typical image classification task.
> The task of predicting image categories is called image classification. The purpose of training the image classification model is to identify various types of images.
This identification is very useful. You can use this identification information for code generation or automated testing.
Taking code generation as an example, suppose we have a sketch design draft and the entire design draft is composed of different components. We can traverse the layers of the entire design draft. For each layer, use the model of image classification to identify what component each layer is. After that, we can replace the original design draft layer with the front-end component to generate the front-end code.
Another example is in the scenario of automated testing. We need an ability to identify the type of each layer. For the button that is recognized, we can automatically click to see if the button works. For the list component that we recognize, we can automatically track loading speed to monitor performance, etc.
## Examples
For example, in the scenario where the forms are automatically generated, we need to identify which components are column charts or pie charts, as shown in the following figure:


After the training is completed, for each picture, the model will eventually give us the prediction results we want. For example, when we enter the line chart of Figure 1, the model will give prediction results similar to the following:
```
[[0.1, 0.9]]
```
At the same time, we will generate a labelmap during training. Labelmap is a mapping relationship between the serial number and the actual type. This generation is mainly due to the fact that our classification name is text, but before entering the model, we need to convert the text Into numbers. Here is a labelmap:
```json
{
"column": 0,
"pie": 1,
}
```
First, why is the prediction result a two-dimensional array? First of all, the model allows prediction of multiple pictures at once. For each picture, the model will also give an array, this array describes the possibility of each classification, as shown in the labelmap, the classification is arranged in the order of column chart and pie chart, then corresponding to the prediction result of the model, We can see that the column chart has the highest confidence, which is 0.9, so this picture is predicted to be a column chart, that is, the prediction is correct.
## Data Preparation
When we are doing image classification tasks similar to this one, we need to organize our dataset in a certain format.
We need to divide our dataset into a training set (train), a validation set (validation) and a test set (test) according to a certain proportion. Among them, the training set is mainly used to train the model, and the validation set and the test set are used to evaluate the model. The validation set is mainly used to evaluate the model during the training process to facilitate viewing of the model's overfitting and convergence. The test set is used to perform an overall evaluation of the model after all training is completed.
In the training/validation/test set, we will organize the data according to the classification category. For example, we now have two categories, line and ring, then we can create two folders for these two category names, in the corresponding Place pictures under the folder. The overall directory structure is:
- train
- ring
- xx.jpg
- ...
- line
- xxjpg
- ...
- column
- ...
- pie
- ...
- validation
- ring
- xx.jpg
- ...
- line
- xx.jpg
- ...
- column
- ...
- pie
- ...
- test
- ring
- xx.jpg
- ...
- line
- xx.jpg
- ...
- column
- ...
- pie
- ...
We have prepared such a dataset, you can download it and check it out:[Download here](http://ai-sample.oss-cn-hangzhou.aliyuncs.com/pipcook/datasets/component-recognition-image-classification/component-recognition-classification.zip).
## Start Training
After the dataset is ready, we can start training. Using Pipcook can be very convenient for the training of image classification. You only need to build the following pipeline:
```json
{
"specVersion": "2.0",
"datasource": "https://cdn.jsdelivr.net/gh/imgcook/pipcook-script@fe00a8e/scripts/image-classification-mobilenet/build/datasource.js?url=http://ai-sample.oss-cn-hangzhou.aliyuncs.com/pipcook/datasets/component-recognition-image-classification/component-recognition-classification.zip",
"dataflow": [
"https://cdn.jsdelivr.net/gh/imgcook/pipcook-script@fe00a8e/scripts/image-classification-mobilenet/build/dataflow.js?size=224&size=224"
],
"model": "https://cdn.jsdelivr.net/gh/imgcook/pipcook-script@fe00a8e/scripts/image-classification-mobilenet/build/model.js",
"artifact": [{
"processor": "pipcook-artifact-zip@0.0.2",
"target": "/tmp/mobilenet-model.zip"
}],
"options": {
"framework": "tfjs@3.8",
"train": {
"epochs": 15,
"validationRequired": true
}
}
}
```
Through the above scripts, we can see that they are used separately:
1. **datasource** This script is used to download the dataset that meets the image classification described above. Mainly, we need to provide the url parameter, and we provide the dataset address that we prepared above
2. **dataflow** When performing image classification, we need to have some necessary operations on the original data. For example, image classification requires that all pictures are of the same size, so we use this script to resize the pictures to a uniform size
3. **model** We use this script to define, train and evaluate and save the model.
[mobilenet](https://arxiv.org/abs/1704.04861) is a lightweight model which can be trained on CPU. If you are using [resnet](https://arxiv.org/abs/1512.03385),since the model is quite large, we recommend use to train on GPU.
> CUDA, short for Compute Unified Device Architecture, is a parallel computing platform and programming model founded by NVIDIA based on the GPUs (Graphics Processing Units, which can be popularly understood as graphics cards).
> With CUDA, GPUs can be conveniently used for general purpose calculations (a bit like numerical calculations performed in the CPU, etc.). Before CUDA, GPUs were generally only used for graphics rendering (such as through OpenGL, DirectX).
Now let's run our image-classification job!
```
!sudo pipcook run https://raw.githubusercontent.com/alibaba/pipcook/main/example/pipelines/databinding-image-classification-resnet.json
```
Often the model will converge at 10-20 epochs. Of course, it depends on the complexity of your dataset. Model convergence means that the loss (loss value) is low enough and the accuracy is high enough.
After the training is completed, output will be generated in the current directory, which is a brand-new npm package, then we first install dependencies:
```
!cd output && sudo npm install --unsafe-perm
!wget http://ai-sample.oss-cn-hangzhou.aliyuncs.com/pipcook/dsw/predict.js
```
Now we can predict. You can just have a try on code below to predict the image we provide. You can replace the image url with your own url to try on your own dataset. The predict result is in form of probablity of each category as we have explained before.
```
!node predict.js https://img.alicdn.com/tfs/TB1ekuMhQY2gK0jSZFgXXc5OFXa-400-400.jpg
```
Note that the prediction result we give is the probability of each category. You can process this probability to the result you want.
## Conclusion
In this way, the component recognition task based on the image classification model is completed. After completing the pipeline in our example, if you are interested in such tasks, you can also start preparing your own dataset for training. We have already introduced the format of the dataset in detail in the data preparation chapter. You only need to follow the file directory to easily prepare the data that matches our image classification pipeline.
| github_jupyter |
# Travelling Salesman Problem (TSP)
If we have a list of city and distance between cities, travelling salesman problem is to find out the least sum of the distance visiting all the cities only once.
<img src="https://user-images.githubusercontent.com/5043340/45661145-2f8a7a80-bb37-11e8-99d1-42368906cfff.png" width="400">
Please prepare the blueqat first.
```
!pip3 install blueqat
```
Import libraries and make an instance
```
import blueqat.wq as wq
import numpy as np
a = wq.Opt()
```
## Example
Let's see the example we have 4 cities ABCD and we have to visit these cities once. All the cities are connected each other with the distance value as below.
<img src="https://user-images.githubusercontent.com/5043340/45661003-8ba0cf00-bb36-11e8-95fc-573e77ded327.png" width="400">
## Qubomatrix
We need a QUBO matrix to solve this problem on ising model.
Now we have a cost function as this,
$H = \sum_{v=1}^n\left( 1-\sum_{j=1}^N x_{v,j} \right)^2 + \sum_{j=1}^N\left(1-\sum_{v=1}^Nx_{v,j} \right)^2 + B\sum_{(u,v)\in E}W_{u,v}\sum_{j=1}^N x_{u,j} x_{v,j+1}$ ・・・・・(1)
$x_{vj}$ is a binary value if visit city $v$ on $j$ th order.
$x_{vj} = 1$ (if visit city v on jth order)、$0$ (not visit)
We need${N}^2$×${N}^2$ of matrix for N cities.
Now we have 4 cities, so finally we need 4*4 matrix.
Simly we show $x_{vj}$ as $q_i$
$x_{11}, x_{12}, x_{13}, x_{14}$ → $q_0, q_1, q_2, q_3$
$x_{21}, x_{22}, x_{23}, x_{24}$ → $q_4, q_5, q_6, q_7$
$x_{31}, x_{32}, x_{33}, x_{34}$ → $q_8, q_{9}, q_{10}, q_{11}$
$x_{41}, x_{42}, x_{43}, x_{44}$ → $q_{12}, q_{13}, q_{14}, q_{15}$
We put number as ABCD cities as $x$1:A、2:B、3:C、4:D
To calculate the TSP we need 2 constraint term and 1 cost function
* Visit just once on every city.
* Visit just one city on jth order.
* Minimize the total distance.
## Visit just once on every city
<img src="https://user-images.githubusercontent.com/5043340/45663268-8a749f80-bb40-11e8-8c4a-8b2ad1dd3f35.png" width="400">
If we think about the constraint visit just once on every city, we have to think about just one qubit on every row will be 1 and others should be 0.
たとえば、$q_0+q_1+q_2+q_3 = 1$. We think this on all of the row and we get.
${(1-q_0-q_1-q_2-q_3)^2+(1-q_4-q_5-q_6-q_7)^2+(1-q_8-q_9-q_{10}-q_{11})^2+(1-q_{12}-q_{13}-q_{14}-q_{15})^2
}$
## Visit just one city on jth order
Think about the second constraint.
<img src="https://user-images.githubusercontent.com/5043340/45666641-1bec0d80-bb51-11e8-87f7-0d1bb522f2e8.png" width="400">
Now we have to think about the column that only one qubit on every col is 1 and others should be 0.
${(1-q_0-q_4-q_8-q_{12})^2+(1-q_1-q_5-q_9-q_{13})^2+(1-q_2-q_6-q_{10}-q_{14})^2+(1-q_{3}-q_{7}-q_{11}-q_{15})^2
}$
Finally we have,
${2q_0q_1 + 2q_0q_{12} + 2q_0q_2 + 2q_0q_3 + 2q_0q_4 + 2q_0q_8 - 2q_0}$
${+ 2q_1q_{13} + 2q_1q_2 + 2q_1q_3 + 2q_1q_5 + 2q_1q_9 - 2q_1}$
${ + 2q_{10}q_{11} + 2q_{10}q_{14} + 2q_{10}q_2 + 2q_{10}q_6 + 2q_{10}q_8 + 2q_{10}q_9 - 2q_{10} }$
${+ 2q_{11}q_{15} + 2q_{11}q_3 + 2q_{11}q_7 + 2q_{11}q_8 + 2q_{11}q_9 - 2q_{11}}$
${+ 2q_{12}q_{13} + 2q_{12}q_{14} + 2q_{12}q_{15} + 2q_{12}q_4 + 2q_{12}q_8 - 2q_{12} }$
${+ 2q_{13}q_{14}+ 2q_{13}q_{15} + 2q_{13}q_5 + 2q_{13}q_9 - 2q_{13} }$
${+ 2q_{14}q_{15} + 2q_{14}q_2 + 2q_{14}q_6 - 2q_{14}}$
${+ 2q_{15}q_3 + 2q_{15}q_7 - 2q_{15}}$
${+ 2q_2q_3 + 2q_2q_6 - 2q_2 + 2q_3q_7 - 2q_3 }$
${+ 2q_4q_5 + 2q_4q_6 + 2q_4q_7 + 2q_4q_8 - 2q_4 + 2q_5q_6 + 2q_5q_7 + 2q_5q_9 - 2q_5 }$
${ +2q_6q_7 - 2q_6 - 2q_7 + 2q_8q_9 - 2q_8 - 2q_9 + 8}$
Write down on a QUBO matrix and we have
<img src="https://user-images.githubusercontent.com/5043340/45666980-42f70f00-bb52-11e8-93a7-245e9d0f5609.png" width="400">
## Minimize the total distance
Finally we have to think about the cost function of the total sum of distance and we get this QUBO matrix thinking about the distance between two cities as Jij on the matrix.
<img src="https://user-images.githubusercontent.com/5043340/45667633-f3661280-bb54-11e8-9fbe-5dba63749b1d.png" width="400">
## Add all of the equation and calculate
We choose the parameter B=0.25 and get the final QUBO matrix which is the sum of all matrix.
## Calculate
Put the QUBO on python and start calculating.
```
a.qubo=np.array([
[-2,2,2,2,2,0,0,0,2,0,0,0,2,0,0,0],
[0,-2,2,2,0,2,0,0,0,2,0,0,0,2,0,0],
[0,0,-2,2,0,0,2,0,0,0,2,0,0,0,2,0],
[0,0,0,-2,0,0,0,2,0,0,0,2,0,0,0,2],
[0,0,0,0,-2,2,2,2,2,0,0,0,2,0,0,0],
[0,0,0,0,0,-2,2,2,0,2,0,0,0,2,0,0],
[0,0,0,0,0,0,-2,2,0,0,2,0,0,0,2,0],
[0,0,0,0,0,0,0,-2,0,0,0,2,0,0,0,2],
[0,0,0,0,0,0,0,0,-2,2,2,2,2,0,0,0],
[0,0,0,0,0,0,0,0,0,-2,2,2,0,2,0,0],
[0,0,0,0,0,0,0,0,0,0,-2,2,0,0,2,0],
[0,0,0,0,0,0,0,0,0,0,0,-2,0,0,0,2],
[0,0,0,0,0,0,0,0,0,0,0,0,-2,2,2,2],
[0,0,0,0,0,0,0,0,0,0,0,0,0,-2,2,2],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,-2,2],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-2],
])
+np.array([
[0,0,0,0,0,2,0,2,0,1,0,1,0,3,0,3],
[0,0,0,0,2,0,2,0,1,0,1,0,3,0,3,0],
[0,0,0,0,0,2,0,2,0,1,0,1,0,3,0,3],
[0,0,0,0,2,0,2,0,1,0,1,0,3,0,3,0],
[0,0,0,0,0,0,0,0,0,4,0,4,0,2,0,2],
[0,0,0,0,0,0,0,0,4,0,4,0,2,0,2,0],
[0,0,0,0,0,0,0,0,0,4,0,4,0,2,0,2],
[0,0,0,0,0,0,0,0,4,0,4,0,2,0,2,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,2],
[0,0,0,0,0,0,0,0,0,0,0,0,2,0,2,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,2],
[0,0,0,0,0,0,0,0,0,0,0,0,2,0,2,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
])*0.25
answer = a.sa()
```
And now we have,
```
print(answer)
```
Result is
[1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0]
This shows that the city should be visited from A→C→D→B→A
| github_jupyter |
# Mis on Jupyter / Jupyter Notebook?
* Interaktiivne Pythoni (ja teiste programeerimis keelte programeerimise keskkond).
* Põhirõhk on lihtsal eksperimenteerimisel ja katsetamisel. Samuti sellel et hiljem jääks katsetustest jälg.
* Lisaks Pythonile toetab ka muid programeerimiskeeli mida võiks vaja minna andmeanalüüsis...
# Mida proovin tutvustada?
* [Jupyter notebook](http://jupyter.org) keskkonda üldiselt
* (Üli)natukene Pythonit
* [Pandas](http://pandas.pydata.org/) teeki andmeanalüüsiks Pythonis/Jupyter notebookis.
# Mida installida?
Kõige lihtsam võimalus Pythoni ja Jupyteri installimiseks on tõmmata Anaconda Pythoni installer. Sellega tulevad kaasa kõik vajalikud teegid ja programmid (Jupyter notebook ja Pandas).
* [Anaconda](https://www.continuum.io/downloads) - Kindlasti tõmmata alla Python 3.6 Anaconda installer!
# Jupyter notebook-i tutvustus

*[Jupyter vihik näidis Zika viiruse RNASeq-i analüüsiga](https://github.com/MaayanLab/Zika-RNAseq-Pipeline/blob/master/Zika.ipynb)*
*Artikkel [Wang et al. 2016](https://f1000research.com/articles/5-1574/v1)*
### Jupyter notebook
Jupyter-i Notebooki saab käivitada Start menüüst otsides seda nime järgi.
* Käivitub Jupyteri Notebook-i server lokaalses masinas.
* Automaatselt avatakse browseris ka Jupyteri Notebook ise.
Tavaliselt avaneb Jupyter-i Notebook kõigepealt kasutaja kodukataloogis. Töö jaoks saab luua paremalt `New` menüüst eraldi kataloogi endale.`

*Jupyter-i vihikute töökataloog*
Et luua uus vihik vali paremalt ülevalt "New" menüüst "Python 3"

*Jupyter-i vihikute töökataloog*
Uus tühi vihik näeb välja selline. Vihik on jaotatud erinevateks koodi ja Markdown-i teksti lahtriteks. Vastavalt lahtrile Jupyter kas jooksutab koodi ja kuvab selle tulemusi või siis muudab Markdownis kirjutatu browseris vormindatud tekstiks.

*Uus tühi Jupyter-i Pythoni vihik*
"Help" menüü all on abimaterjalid nii Jupyter notebook-i enda kui ka erinevate Pythoni teekide jaoks (Scipy, Pandas jne.)

*Abimaterjalid*
## Töötamine Jupyter-i vihikuga
Töö Jupyteri vihikus käib lahtri (*cell*) kaupa. Lahtreid võib olla mitut tüüpi. Tähtsamad neist on:
* Koodi lahter (*Code cell*) - Nendesse lahtritesse kirjutataks Pythoni koodi mida siis hiljem saab analüüsi kordamisel uuesti läbi jooksutada lahter lahtri kaupa.
* Markdown lahter (*Markdown cell*) - Lahtrid kuhu saab kirjutada Markdown vormingus teksti et oma koodi/analüüsi mõtestada.
## Koodi lahter (*Code cell*)

*Koodi lahter*
Koodi lahtrisse kirjutatud koodi jooksutamiseks tuleb aktiivses lahtris vajutada `Shift-Enter`. Peale seda kuvatakse selle alla jooksutatud koodi väljund (kui seda on) ja tekitatakse uus koodi lahter. Teiste nupukombinatsioonidega saab koodi lahtrit lihtsalt jooksutada (`Ctrl-Enter`).
* `Shift-Enter` - Jooksutab koodi ja loob selle alla uue lahtri
* `Ctrl-Enter` - Jooksutab koodi kuid ei loo uut lahtrit
Number koodilahtri kõrval näitab jooksutatud koodi järjekorda. Liikudes mitme koodilahtri vahet ja katsetades asju on selle järgi hea vaadata millist koodi on juba jooksutatud.

*Jooksutatud koodi lahter*
Number koodilahtri kõrval näitab jooksutatud koodi järjekorda. Liikudes mitme koodilahtri vahet ja katsetades asju on selle järgi hea vaadata millist koodi on juba jooksutatud.

*Jooksutatud koodi lahtrid*
## Teksti lahter (*Markdown* cell)
*Markdown* on teksti vormindamise keel mis on samal ajal ka lihtsalt loetav. Et muuta lahtrit *Markdown* lahtriks tuleb see valida "Cell Type" menüüst.

*Markdown lahter teksti kirjutamisel*
Et kuvada kirjutatud Markdown koodi siis vajutada jällegi `Shift-Enter`

*Kuvatud Markdowni lahter*
## Markdown-i kirjutamine
Täpsema juhendi Markdown-is vormindamise jaoks saab leida lingilt https://help.github.com/articles/basic-writing-and-formatting-syntax/
Näiteks teksti stiilide muutmine käib nii:

Nimekirjade tegemine käib nii:

Markdown-iga on võimalik sisestada ka lihtsamaid tabeleid. Tabelite tegemiseks läheb vaja tekst paigutada `|` ja `+` sümbolite vahele.
| Pealkiri | Teine pealkiri|
| ------------- | ------------- |
| Sisu | Sisu |
| Sisu | Sisu |
Põhjalikuma juhendi leiab https://help.github.com/articles/organizing-information-with-tables/
## MathJax-iga valemite kirjutamine
Endale märkmeks linke siia et hiljem vihikut täiendada:
* https://stackoverflow.com/questions/13208286/how-to-write-latex-in-ipython-notebook
* http://jupyter-notebook.readthedocs.io/en/latest/examples/Notebook/Typesetting%20Equations.html
* http://data-blog.udacity.com/posts/2016/10/latex-primer/
## Kokkuvõte
Selliselt koodi ja teksti kirjutades on väga mugav üles ehitada analüüsi ja seda samal ajal dokumenteerida:
- On lihtsasti võimalik kirjutada ja muuta koodi samal ajal eksperimenteerides sellega.
- Markdowni kasutamine lubab andmeid, koodi ja analüüsi järeldusi kirja panna ja annoteerida et hilisem lugemine oleks arusaadavam (nii endal kui teistel).
- Saab kaasa panna pilte, mis on tehtud analüüsi käigus.
- Hiljem saab kirjutatud vihikut eksportida teistesse formaatidesse (vt `File->"Download as"`).
- Python ei ole ainus keel millega vihikuid saab kirjutada. Võimalik on see näiteks ka R-iga (ja paljude teiste keeltega).
### Veel näpunäiteid
* Tervet vihikut otsast peale saab jooksutada menüüst `Kernel->Restart & Run All`
* Koodi lahtrites tasub kirjutada nii, et see on ilusti ülevalt alla järjekorras. On väga lihtne juhtuma et eksperimenteerides hakkad sa kirjutama koodi vales järjekorras ja hiljem vihik ei jookse ilusti.
* Lahtrite muutmisel on kaks erinevat olekut:
* `Edit mode` - Selle jooksul kirjutad sa tavaliselt lahtrisse koodi või teksti
* `Command mode` - Vajutades `Esc` nuppu minnakse aktiivses lahtris mode-i kus saab manipuleerida lahtritega kasutades erinevaid nupukombinatsioone.
* Väga kasulikuks kiiremaks tööks tulevad erinevad Shortcut-id. Neid kõiki näeb menüüst `Help->Keyboard Shortcuts`. Mõned põhilisemad on:
* `Shift-Enter` - Jooksutab lahtri ja liigub järgmisesse lahtrisse
* `Ctrl-Enter` - Joosutab lahtri kuid jääb sama lahtri peale
* `Alt-enter` - Jooksutab lahtri ja loob uue tühja lahtri selle alla
* `Y` - Muudab lahtri Koodi lahtriks
* `M` - Muudab lahtri Markdown lahtriks
* `A` - Lisa lahter olemasoleva alla
* `B` - Lisa lahter olemasoleva kohale
* `D, D` - Kustuta parasjagu aktiivne lahter
# Viited
1. [Jupyter](http://jupyter.org/)
2. [Anaconda Pythoni allalaadimine](https://www.anaconda.com/download/)
3. [Gallery of Interesting Jupyter Notebooks](https://github.com/jupyter/jupyter/wiki/A-gallery-of-interesting-Jupyter-Notebooks#pandas-for-data-analysis)
4. [Jupyter vihik näidis Zika viiruse RNASeq-i analüüsiga](https://github.com/MaayanLab/Zika-RNAseq-Pipeline/blob/master/Zika.ipynb)
5. [Markdown teksti kirjutamise süntaks](https://help.github.com/articles/basic-writing-and-formatting-syntax/)
6. [Nature-i artikkel IPythonist/Jupyter-ist](http://www.nature.com/news/interactive-notebooks-sharing-the-code-1.16261)
| github_jupyter |
# Spark on Tour
## Ejemplo de procesamiento de datos en streaming para generar un dashboard en NRT
En este notebook vamos a ver un ejemplo completo de como se podría utilizar la API de streaming estructurado de Spark para procesar un stream de eventos de puntuación en vivo, en el tiempo real, y generar como salida un conjunto de estadísticas, o valores agregados, con los que poder construir un dashboard de visualización y monitorización en tiempo real.
Particularmente vamos a simular una plataforma de vídeo bajo demanda en la que los usuarios están viendo pelítculas y puntuándolas. Tomaremos los eventos de puntuación que van entrando en streaming, y genrar, en tiempo real, estadísticas de visualización agredas por género, de forma que podamos monitorizar qué películas son las más populates en este momento.
### Importamos librerías, definimos esquemas e inicializamos la sesión Spark.
```
import findspark
findspark.init()
import pyspark
from pyspark.sql.types import *
from pyspark.sql import SparkSession
import pyspark.sql.functions as f
from IPython.display import clear_output
import plotly.express as px
ratingSchema = StructType([
StructField("user", IntegerType()),
StructField("movie", IntegerType()),
StructField("rating", FloatType())
])
movieSchema = StructType([
StructField("movie", IntegerType()),
StructField("title", StringType()),
StructField("genres", StringType())
])
def foreach_batch_function(df, epoch_id):
mostPopularMovies = df.limit(10).toPandas()
clear_output()
print(mostPopularMovies)
#setup spark session
sparkSession = (SparkSession.builder
.appName("Movie ratings streaming")
.master("local[*]")
.config("spark.scheduler.mode", "FAIR")
.getOrCreate())
sparkSession.sparkContext.setLogLevel("ERROR")
```
### Leemos el dataset de películas
```
movies = sparkSession.read.csv("/tmp/movielens/movies.csv", schema=movieSchema, header=True)
movies.show()
```
### Inicializamos la carga del stream de puntuaciones desde Apache Kafka
```
dataset = (sparkSession
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", "localhost:29092")
.option("subscribe", "ratings")
.load())
dataset = dataset.selectExpr("CAST(value AS STRING)")
dataset = dataset.select(f.from_json(f.col("value"), ratingSchema).alias("data")).select("data.*")
```
### Agrupamos por película y sumamos visualizaciones y media de puntuación
```
dataset = dataset.select("movie", "rating") \
.groupBy("movie") \
.agg(f.count("rating").alias("num_ratings"), f.avg("rating").alias("avg_rating"))
```
### Mezclamos con el dataset de películas para obtener el título
```
dataset = dataset.join(movies, dataset["movie"] == movies["movie"], "left_outer") \
.drop(movies["movie"]) \
.drop("genres")
```
### Ordenamos la salida por número de votaciones (visualizaciones)
```
dataset = dataset.select("movie", "title", "avg_rating", "num_ratings") \
.sort(f.desc("num_ratings"))
```
### Ejecutamos el procesamiento en streaming
```
query = dataset \
.writeStream \
.outputMode("complete") \
.format("console") \
.trigger(processingTime='5 seconds') \
.foreachBatch(foreach_batch_function) \
.start()
query.explain()
query.awaitTermination()
```
| github_jupyter |
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# Enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint (Eq. 53 of [Ruchlin, Etienne, and Baumgarte (2018)](https://arxiv.org/abs/1712.07658))
## Author: Zach Etienne
### Formatting improvements courtesy Brandon Clark
[comment]: <> (Abstract: TODO)
**Module Status:** <font color='green'><b> Validated </b></font>
**Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](#code_validation). In addition, its output has been
### NRPy+ Source Code for this module: [BSSN/Enforce_Detgammabar_Constraint.py](../edit/BSSN/Enforce_Detgammabar_Constraint.py)
## Introduction:
[Brown](https://arxiv.org/abs/0902.3652)'s covariant Lagrangian formulation of BSSN, which we adopt, requires that $\partial_t \bar{\gamma} = 0$, where $\bar{\gamma}=\det \bar{\gamma}_{ij}$. Further, all initial data we choose satisfies $\bar{\gamma}=\hat{\gamma}$.
However, numerical errors will cause $\bar{\gamma}$ to deviate from a constant in time. This actually disrupts the hyperbolicity of the PDEs, so to cure this, we adjust $\bar{\gamma}_{ij}$ at the end of each Runge-Kutta timestep, so that its determinant satisfies $\bar{\gamma}=\hat{\gamma}$ at all times. We adopt the following, rather standard prescription (Eq. 53 of [Ruchlin, Etienne, and Baumgarte (2018)](https://arxiv.org/abs/1712.07658)):
$$
\bar{\gamma}_{ij} \to \left(\frac{\hat{\gamma}}{\bar{\gamma}}\right)^{1/3} \bar{\gamma}_{ij}.
$$
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
This notebook is organized as follows:
1. [Step 1](#initializenrpy): Initialize needed NRPy+ modules
1. [Step 2](#enforcegammaconstraint): Enforce the $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint
1. [Step 3](#code_validation): Code Validation against `BSSN.Enforce_Detgammabar_Constraint` NRPy+ module
1. [Step 4](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='initializenrpy'></a>
# Step 1: Initialize needed NRPy+ modules \[Back to [top](#toc)\]
$$\label{initializenrpy}$$
```
# Step P1: import all needed modules from NRPy+:
from outputC import *
import NRPy_param_funcs as par
import grid as gri
import loop as lp
import indexedexp as ixp
import finite_difference as fin
import reference_metric as rfm
import BSSN.BSSN_quantities as Bq
# Set spatial dimension (must be 3 for BSSN)
DIM = 3
par.set_parval_from_str("grid::DIM",DIM)
# Then we set the coordinate system for the numerical grid
par.set_parval_from_str("reference_metric::CoordSystem","SinhSpherical")
rfm.reference_metric() # Create ReU, ReDD needed for rescaling B-L initial data, generating BSSN RHSs, etc.
```
<a id='enforcegammaconstraint'></a>
# Step 2: Enforce the $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint \[Back to [top](#toc)\]
$$\label{enforcegammaconstraint}$$
Recall that we wish to make the replacement:
$$
\bar{\gamma}_{ij} \to \left(\frac{\hat{\gamma}}{\bar{\gamma}}\right)^{1/3} \bar{\gamma}_{ij}.
$$
Notice the expression on the right is guaranteed to have determinant equal to $\hat{\gamma}$.
$\bar{\gamma}_{ij}$ is not a gridfunction, so we must rewrite the above in terms of $h_{ij}$:
\begin{align}
\left(\frac{\hat{\gamma}}{\bar{\gamma}}\right)^{1/3} \bar{\gamma}_{ij} &= \bar{\gamma}'_{ij} \\
&= \hat{\gamma}_{ij} + \varepsilon'_{ij} \\
&= \hat{\gamma}_{ij} + \text{Re[i][j]} h'_{ij} \\
\implies h'_{ij} &= \left[\left(\frac{\hat{\gamma}}{\bar{\gamma}}\right)^{1/3} \bar{\gamma}_{ij} - \hat{\gamma}_{ij}\right] / \text{Re[i][j]} \\
&= \left(\frac{\hat{\gamma}}{\bar{\gamma}}\right)^{1/3} \frac{\bar{\gamma}_{ij}}{\text{Re[i][j]}} - \delta_{ij}\\
&= \left(\frac{\hat{\gamma}}{\bar{\gamma}}\right)^{1/3} \frac{\hat{\gamma}_{ij} + \text{Re[i][j]} h_{ij}}{\text{Re[i][j]}} - \delta_{ij}\\
&= \left(\frac{\hat{\gamma}}{\bar{\gamma}}\right)^{1/3} \left(\delta_{ij} + h_{ij}\right) - \delta_{ij}
\end{align}
Upon inspection, when expressing $\hat{\gamma}$ SymPy generates expressions like `(xx0)^{4/3} = pow(xx0, 4./3.)`, which can yield $\text{NaN}$s when `xx0 < 0` (i.e., in the `xx0` ghost zones). To prevent this, we know that $\hat{\gamma}\ge 0$ for all reasonable coordinate systems, so we make the replacement $\hat{\gamma}\to |\hat{\gamma}|$ below:
```
# We will need the h_{ij} quantities defined within BSSN_RHSs
# below when we enforce the gammahat=gammabar constraint
# Step 1: All barred quantities are defined in terms of BSSN rescaled gridfunctions,
# which we declare here in case they haven't yet been declared elsewhere.
Bq.declare_BSSN_gridfunctions_if_not_declared_already()
hDD = Bq.hDD
Bq.BSSN_basic_tensors()
gammabarDD = Bq.gammabarDD
# First define the Kronecker delta:
KroneckerDeltaDD = ixp.zerorank2()
for i in range(DIM):
KroneckerDeltaDD[i][i] = sp.sympify(1)
# The detgammabar in BSSN_RHSs is set to detgammahat when BSSN_RHSs::detgbarOverdetghat_equals_one=True (default),
# so we manually compute it here:
dummygammabarUU, detgammabar = ixp.symm_matrix_inverter3x3(gammabarDD)
# Next apply the constraint enforcement equation above.
hprimeDD = ixp.zerorank2()
for i in range(DIM):
for j in range(DIM):
hprimeDD[i][j] = \
(sp.Abs(rfm.detgammahat)/detgammabar)**(sp.Rational(1,3)) * (KroneckerDeltaDD[i][j] + hDD[i][j]) \
- KroneckerDeltaDD[i][j]
enforce_detg_constraint_vars = [ \
lhrh(lhs=gri.gfaccess("in_gfs","hDD00"),rhs=hprimeDD[0][0]),
lhrh(lhs=gri.gfaccess("in_gfs","hDD01"),rhs=hprimeDD[0][1]),
lhrh(lhs=gri.gfaccess("in_gfs","hDD02"),rhs=hprimeDD[0][2]),
lhrh(lhs=gri.gfaccess("in_gfs","hDD11"),rhs=hprimeDD[1][1]),
lhrh(lhs=gri.gfaccess("in_gfs","hDD12"),rhs=hprimeDD[1][2]),
lhrh(lhs=gri.gfaccess("in_gfs","hDD22"),rhs=hprimeDD[2][2]) ]
enforce_gammadet_string = fin.FD_outputC("returnstring",enforce_detg_constraint_vars,
params="outCverbose=False,preindent=0,includebraces=False")
with open("BSSN/enforce_detgammabar_constraint.h", "w") as file:
indent = " "
file.write("void enforce_detgammabar_constraint(const int Nxx_plus_2NGHOSTS[3],REAL *xx[3], REAL *in_gfs) {\n\n")
file.write(lp.loop(["i2","i1","i0"],["0","0","0"],
["Nxx_plus_2NGHOSTS[2]","Nxx_plus_2NGHOSTS[1]","Nxx_plus_2NGHOSTS[0]"],
["1","1","1"],["#pragma omp parallel for",
" const REAL xx2 = xx[2][i2];",
" const REAL xx1 = xx[1][i1];"],"",
"const REAL xx0 = xx[0][i0];\n"+enforce_gammadet_string))
file.write("}\n")
print("Output C implementation of det(gammabar) constraint to file BSSN/enforce_detgammabar_constraint.h")
```
<a id='code_validation'></a>
# Step 3: Code Validation against `BSSN.Enforce_Detgammabar_Constraint` NRPy+ module \[Back to [top](#toc)\]
$$\label{code_validation}$$
Here, as a code validation check, we verify agreement in the C code output between
1. this tutorial and
2. the NRPy+ [BSSN.Enforce_Detgammabar_Constraint](../edit/BSSN/Enforce_Detgammabar_Constraint.py) module.
```
!mv BSSN/enforce_detgammabar_constraint.h BSSN/enforce_detgammabar_constraint.h-validation
gri.glb_gridfcs_list = []
import BSSN.Enforce_Detgammabar_Constraint as EGC
EGC.output_Enforce_Detgammabar_Constraint_Ccode()
import filecmp
for file in ["BSSN/enforce_detgammabar_constraint.h"]:
if filecmp.cmp(file,file+"-validation") == False:
print("VALIDATION TEST FAILED on file: "+file+".")
exit(1)
else:
print("Validation test PASSED on file: "+file)
```
<a id='latex_pdf_output'></a>
# Step 4: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[Tutorial-BSSN-Enforcing_Determinant_gammabar_equals_gammahat_Constraint.pdf](Tutorial-BSSN-Enforcing_Determinant_gammabar_equals_gammahat_Constraint.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```
!jupyter nbconvert --to latex --template latex_nrpy_style.tplx Tutorial-BSSN-Enforcing_Determinant_gammabar_equals_gammahat_Constraint.ipynb
!pdflatex -interaction=batchmode Tutorial-BSSN-Enforcing_Determinant_gammabar_equals_gammahat_Constraint.tex
!pdflatex -interaction=batchmode Tutorial-BSSN-Enforcing_Determinant_gammabar_equals_gammahat_Constraint.tex
!pdflatex -interaction=batchmode Tutorial-BSSN-Enforcing_Determinant_gammabar_equals_gammahat_Constraint.tex
!rm -f Tut*.out Tut*.aux Tut*.log
```
| github_jupyter |
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from typing import Callable
# Define some types
Func= Callable[..., np.float64]
```
# Monte Carlo Integration
$$
I(f) = \int_{\Omega} f(x) \, dx, \quad x\in\mathbb{R}^d
$$
## Numerical Integration
Any numerical integration method (including Monte Carlo Method) can be written in the following form:
$$
I_N(f) \approx \sum_{i=1}^N A_i f(x_i)
$$
```
# quads = (points, weights)
# shape: ((N,d), (N,))
def integrate_test(f: Func, quads: tuple[np.ndarray, np.ndarray]):
points, weights = quads
assert points.shape[0] == weights.shape[0]
num_points = points.shape[0]
integration = 0.0
for i in range(num_points):
integration += weights[i] * f(points[i])
return integration
# quads = (points, weights)
# shape: ((N,d), (N,))
def integrate(f: Func, quads: tuple[np.ndarray, np.ndarray]):
points, weights = quads
if isinstance(weights, np.ndarray):
assert points.shape[0] == weights.shape[0]
# weights: (N,)
# f(points): (N,)
integration = np.sum(weights * f(points))
return integration
```
### Example 1
$$
\int_0^{\frac{\pi}{2}} \sin x\, dx = 1,
$$
## Determinastic Methods
Trapezoidal formula (复合梯形法)
$$
I(f) \approx \{\frac{1}{2}f(x_1) + \sum_{i=2}^{N-1} f(x_i) + \frac{1}{2}f(x_N)\} \times h
$$
```
def f1(x: np.ndarray):
return np.sin(x)
N = 10000
h = 0.5 * np.pi / (N-1)
points = np.linspace(0, 0.5 * np.pi, N)
weights = np.ones((N)) * h
weights[0] = weights[-1] = 0.5 * h
%timeit I1 = integrate_test(f1, (points, weights))
I2 = integrate(f1, (points, weights))
I2
errs = []
for h in [0.5, 0.25, 0.1, 0.05, 0.02, 0.01]:
N = int(0.5 * np.pi / h) + 1
points = np.linspace(0, 0.5 * np.pi, N)
weights = np.ones((N)) * h
weights[0] = weights[-1] = 0.5 * h
I = integrate_test(f1, (points, weights))
errs.append(np.abs(I - 1.0))
plt.loglog([0.5, 0.25, 0.1, 0.05, 0.02, 0.01], errs)
```
### Gauss
```
N = 15
a, b = 0.0, np.pi/2
points, weights = np.polynomial.legendre.leggauss(N)
points = 0.5*(points + 1)*(b - a) + a
weights = weights * 0.5 * (b - a)
Gauss_I = integrate(f1, (points, weights))
Gauss_I
```
### Stochastic Method (Monte Carlo)
$$
I_N(f) \approx \frac{\pi}{2N}\sum_{i=1}^N f(X_i), \quad X_i \mathcal{U}[0,\pi/2]
$$
$$
X_{n+1} = a X_{n} + b (\text{mod } m)
$$
```
# seed X_0
rng = np.random.default_rng(0)
# 向量化生成随机数
rng.uniform(0.0, np.pi/2)
# seed
# rng = np.random.default_rng(1)
N = 10000
# sample points
rpoints = rng.uniform(0.0, np.pi/2, N)
weights = np.pi/2/ N
MC_I = integrate(f1, (rpoints, weights))
np.linspace(100, 100000, 20)
mc_errs = []
ns = []
for n in np.linspace(100, 100000, 20):
int_n = int(n)
ns.append(int_n)
rpoints = rng.uniform(0.0, np.pi/2, int_n)
weights = np.pi/2/n
MC_I = integrate(f1, (rpoints, weights))
mc_errs.append(np.abs(MC_I - 1.0))
plt.loglog(ns, mc_errs, ns, 1.0 / np.sqrt(np.asarray(ns)))
```
### Example 2
$$
x, y \in [-1, 1]
$$
$$
f(x, y) = 1, \quad \text{if } x^2 + y^2 < 1
$$
$$
\int_{[0, 1]^2} f(x, y) \, dxdy
$$
```
# z: (2, N)
def f2(z):
x = z[0]
y = z[1]
return (x**2 + y**2 < 1) * 1.0
x = np.linspace(-1.0, 1.0, 100)
y = np.linspace(-1.0, 1.0, 100)
z = np.meshgrid(x, y)
Z = f2(np.asarray(z))
Z.shape
X, Y = z
fig, ax = plt.subplots(figsize=(8,8))
ax.contourf(X, Y, Z)
```
### Monte Carlo
```
# seed
rng = np.random.default_rng(1)
N = 10000000
# sample points
rpoints = rng.uniform(-1.0, 1.0, (2, N))
weights = 4.0 / N
MC_I_2d = integrate(f2, (rpoints, weights))
MC_I_2d
```
### 中矩形公式
```
nx = ny = 1000
h = 2.0 / nx
x = np.arange(-1.0 + 0.5*h, 1.0, h)
y = np.arange(-1.0 + 0.5*h, 1.0, h)
# (2, N=nx*xy)
xy = np.asarray(np.meshgrid(x, y))
points = xy.reshape(2, -1)
weights = h**2
I3 = integrate(f2, (points, weights))
I3
```
### Example 3
$$
\int_{[0, 1]^d} e^{-x} \, dx
$$
| github_jupyter |
## 1. KMeans vs GMM on a Generated Dataset
In the first example we'll look at, we'll generate a Gaussian dataset and attempt to cluster it and see if the clustering matches the original labels of the generated dataset.
We can use sklearn's [make_blobs](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html) function to create a dataset of Gaussian blobs:
```
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets, mixture
%matplotlib inline
n_samples = 1000
varied = datasets.make_blobs(n_samples=n_samples,
cluster_std=[5, 1, 0.5],
random_state=3)
X, y = varied[0], varied[1]
plt.figure( figsize=(16,12))
plt.scatter(X[:,0], X[:,1], c=y, edgecolor='black', lw=1.5, s=100, cmap=plt.get_cmap('viridis'))
plt.show()
```
Now when we hand off this dataset to the clustering algorithms, we obviously will not pass in the labels. So let's start with KMeans and see how it does with the dataset. WIll it be to produce clusters that match the original labels?
```
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=3)
pred = kmeans.fit_predict(X)
plt.figure( figsize=(16,12))
plt.scatter(X[:,0], X[:,1], c=pred, edgecolor='black', lw=1.5, s=100, cmap=plt.get_cmap('viridis'))
plt.show()
```
How good of a job did KMeans do? Was it able to find clusters that match or are similar to the original labels?
Let us now try clustering with [GaussianMixture](http://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html):
```
# TODO: Import GaussianMixture
from sklearn.mixture import GaussianMixture
# TODO: Create an instance of Gaussian Mixture with 3 components
gmm = GaussianMixture(n_components = 3)
# TODO: fit the dataset
gmm = gmm.fit(X)
# TODO: predict the clustering labels for the dataset
pred_gmm = gmm.predict(X)
# Plot the clusters
plt.figure( figsize=(16,12))
plt.scatter(X[:,0], X[:,1], c=pred_gmm, edgecolor='black', lw=1.5, s=100, cmap=plt.get_cmap('viridis'))
plt.show()
```
By visually comparing the result of KMeans and GMM clustering, which one was better able to match the original?
- The GMM is better than KMeans.
# 2. KMeans vs GMM on The Iris Dataset
For our second example, we'll take a dataset that has more than two features. The Iris dataset is great for this purpose since it is reasonable to assume it's distributed according to Gaussian distributions.
The Iris dataset is a labeled dataset with four features:
```
import seaborn as sns
iris = sns.load_dataset("iris")
iris.head()
```
How do you visualize a datset with four dimensions?
There are a few ways (e.g. [PairGrid](https://seaborn.pydata.org/generated/seaborn.PairGrid.html), [t-SNE](http://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html), or [project into a lower number number dimensions using PCA](http://scikit-learn.org/stable/auto_examples/decomposition/plot_pca_iris.html#sphx-glr-auto-examples-decomposition-plot-pca-iris-py)). Let's attempt to visualize using PairGrid because it does not distort the dataset -- it merely plots every pair of features against each other in a subplot:
```
g = sns.PairGrid(iris, hue="species", palette=sns.color_palette("cubehelix", 3), vars=['sepal_length','sepal_width','petal_length','petal_width'])
g.map(plt.scatter)
plt.show()
```
If we cluster the Iris datset using KMeans, how close would the resulting clusters match the original labels?
```
kmeans_iris = KMeans(n_clusters=3)
pred_kmeans_iris = kmeans_iris.fit_predict(iris[['sepal_length','sepal_width','petal_length','petal_width']])
iris['kmeans_pred'] = pred_kmeans_iris
g = sns.PairGrid(iris, hue="kmeans_pred", palette=sns.color_palette("cubehelix", 3), vars=['sepal_length','sepal_width','petal_length','petal_width'])
g.map(plt.scatter)
plt.show()
```
How do these clusters match the original labels?
You can clearly see that visual inspection is no longer useful if we're working with multiple dimensions like this. So how can we evaluate the clustering result versus the original labels?
You guessed it. We can use an external cluster validation index such as the [adjusted Rand score](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.adjusted_rand_score.html) which generates a score between -1 and 1 (where an exact match will be scored as 1).
```
print(pred_kmeans_iris.shape)
print(iris['species'].shape)
# TODO: Import adjusted rand score
from sklearn.metrics import adjusted_rand_score
# TODO: calculate adjusted rand score passing in the original labels and the kmeans predicted labels
iris_kmeans_score = adjusted_rand_score(iris['species'], pred_kmeans_iris)
# Print the score
iris_kmeans_score
```
What if we cluster using Gaussian Mixture models? Would it earn a better ARI score?
```
gmm_iris = GaussianMixture(n_components=3).fit(iris[['sepal_length','sepal_width','petal_length','petal_width']])
pred_gmm_iris = gmm_iris.predict(iris[['sepal_length','sepal_width','petal_length','petal_width']])
iris['gmm_pred'] = pred_gmm_iris
# TODO: calculate adjusted rand score passing in the original
# labels and the GMM predicted labels iris['species']
iris_gmm_score = adjusted_rand_score(iris['species'], pred_gmm_iris)
# Print the score
iris_gmm_score
```
Thanks to ARI socres, we have a clear indicator which clustering result better matches the original dataset.
| github_jupyter |
# Federated Tensorflow Mnist Tutorial
# Long-Living entities update
* We now may have director running on another machine.
* We use Federation API to communicate with Director.
* Federation object should hold a Director's client (for user service)
* Keeping in mind that several API instances may be connacted to one Director.
* We do not think for now how we start a Director.
* But it knows the data shape and target shape for the DataScience problem in the Federation.
* Director holds the list of connected envoys, we do not need to specify it anymore.
* Director and Envoys are responsible for encrypting connections, we do not need to worry about certs.
* Yet we MUST have a cert to communicate to the Director.
* We MUST know the FQDN of a Director.
* Director communicates data and target shape to the Federation interface object.
* Experiment API may use this info to construct a dummy dataset and a `shard descriptor` stub.
```
# Install dependencies if not already installed
# !pip install tensorflow==2.3.1
```
## Connect to the Federation
```
# Create a federation
from openfl.interface.interactive_api.federation import Federation
# please use the same identificator that was used in signed certificate
client_id = 'api'
cert_dir = 'cert'
director_node_fqdn = 'localhost'
director_port=50051
# 1) Run with API layer - Director mTLS
# If the user wants to enable mTLS their must provide CA root chain, and signed key pair to the federation interface
# cert_chain = f'{cert_dir}/root_ca.crt'
# api_certificate = f'{cert_dir}/{client_id}.crt'
# api_private_key = f'{cert_dir}/{client_id}.key'
# federation = Federation(
# client_id=client_id,
# director_node_fqdn=director_node_fqdn,
# director_port=director_port,
# cert_chain=cert_chain,
# api_cert=api_certificate,
# api_private_key=api_private_key
# )
# --------------------------------------------------------------------------------------------------------------------
# 2) Run with TLS disabled (trusted environment)
# Federation can also determine local fqdn automatically
federation = Federation(
client_id=client_id,
director_node_fqdn=director_node_fqdn,
director_port=director_port,
tls=False
)
shard_registry = federation.get_shard_registry()
shard_registry
# First, request a dummy_shard_desc that holds information about the federated dataset
dummy_shard_desc = federation.get_dummy_shard_descriptor(size=10)
dummy_shard_dataset = dummy_shard_desc.get_dataset('train')
sample, target = dummy_shard_dataset[0]
f"Sample shape: {sample.shape}, target shape: {target.shape}"
```
## Describing FL experimen
```
from openfl.interface.interactive_api.experiment import TaskInterface, DataInterface, ModelInterface, FLExperiment
```
### Register model
```
from layers import create_model, optimizer
framework_adapter = 'openfl.plugins.frameworks_adapters.keras_adapter.FrameworkAdapterPlugin'
model = create_model()
MI = ModelInterface(model=model, optimizer=optimizer, framework_plugin=framework_adapter)
```
### Register dataset
```
import numpy as np
from tensorflow.keras.utils import Sequence
class DataGenerator(Sequence):
def __init__(self, shard_descriptor, batch_size):
self.shard_descriptor = shard_descriptor
self.batch_size = batch_size
self.indices = np.arange(len(shard_descriptor))
self.on_epoch_end()
def __len__(self):
return len(self.indices) // self.batch_size
def __getitem__(self, index):
index = self.indices[index * self.batch_size:(index + 1) * self.batch_size]
batch = [self.indices[k] for k in index]
X, y = self.shard_descriptor[batch]
return X, y
def on_epoch_end(self):
np.random.shuffle(self.indices)
class MnistFedDataset(DataInterface):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@property
def shard_descriptor(self):
return self._shard_descriptor
@shard_descriptor.setter
def shard_descriptor(self, shard_descriptor):
"""
Describe per-collaborator procedures or sharding.
This method will be called during a collaborator initialization.
Local shard_descriptor will be set by Envoy.
"""
self._shard_descriptor = shard_descriptor
self.train_set = shard_descriptor.get_dataset('train')
self.valid_set = shard_descriptor.get_dataset('val')
def __getitem__(self, index):
return self.shard_descriptor[index]
def __len__(self):
return len(self.shard_descriptor)
def get_train_loader(self):
"""
Output of this method will be provided to tasks with optimizer in contract
"""
if self.kwargs['train_bs']:
batch_size = self.kwargs['train_bs']
else:
batch_size = 32
return DataGenerator(self.train_set, batch_size=batch_size)
def get_valid_loader(self):
"""
Output of this method will be provided to tasks without optimizer in contract
"""
if self.kwargs['valid_bs']:
batch_size = self.kwargs['valid_bs']
else:
batch_size = 32
return DataGenerator(self.valid_set, batch_size=batch_size)
def get_train_data_size(self):
"""
Information for aggregation
"""
return len(self.train_set)
def get_valid_data_size(self):
"""
Information for aggregation
"""
return len(self.valid_set)
```
### Create Mnist federated dataset
```
fed_dataset = MnistFedDataset(train_bs=64, valid_bs=512)
```
## Define and register FL tasks
```
TI = TaskInterface()
import time
import tensorflow as tf
from layers import train_acc_metric, val_acc_metric, loss_fn
@TI.register_fl_task(model='model', data_loader='train_dataset', \
device='device', optimizer='optimizer')
def train(model, train_dataset, optimizer, device, loss_fn=loss_fn, warmup=False):
start_time = time.time()
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
with tf.GradientTape() as tape:
logits = model(x_batch_train, training=True)
loss_value = loss_fn(y_batch_train, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
# Update training metric.
train_acc_metric.update_state(y_batch_train, logits)
# Log every 200 batches.
if step % 200 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print("Seen so far: %d samples" % ((step + 1) * 64))
if warmup:
break
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print("Training acc over epoch: %.4f" % (float(train_acc),))
# Reset training metrics at the end of each epoch
train_acc_metric.reset_states()
return {'train_acc': train_acc,}
@TI.register_fl_task(model='model', data_loader='val_dataset', device='device')
def validate(model, val_dataset, device):
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataset:
val_logits = model(x_batch_val, training=False)
# Update val metrics
val_acc_metric.update_state(y_batch_val, val_logits)
val_acc = val_acc_metric.result()
val_acc_metric.reset_states()
print("Validation acc: %.4f" % (float(val_acc),))
return {'validation_accuracy': val_acc,}
```
## Time to start a federated learning experiment
```
# create an experimnet in federation
experiment_name = 'mnist_experiment'
fl_experiment = FLExperiment(federation=federation, experiment_name=experiment_name)
# The following command zips the workspace and python requirements to be transfered to collaborator nodes
fl_experiment.start(model_provider=MI,
task_keeper=TI,
data_loader=fed_dataset,
rounds_to_train=5,
opt_treatment='CONTINUE_GLOBAL')
fl_experiment.stream_metrics()
```
| github_jupyter |
# Responding to Events
```
import numpy as np
import holoviews as hv
from holoviews import opts
hv.extension('bokeh')
```
In the [Live Data](./07-Live_Data.ipynb) guide we saw how ``DynamicMap`` allows us to explore high dimensional data using the widgets in the same style as ``HoloMaps``. Although suitable for unbounded exploration of large parameter spaces, the ``DynamicMaps`` described in that notebook support exactly the same mode of interaction as ``HoloMaps``. In particular, the key dimensions are used to specify a set of widgets that when manipulated apply the appopriate indexing to invoke the user-supplied callable.
In this user guide we will explore the HoloViews streams system that allows *any* sort of value to be supplied from *anywhere*. This system opens a huge set of new possible visualization types, including continuously updating plots that reflect live data as well as dynamic visualizations that can be interacted with directly, as described in the [Custom Interactivity](./13-Custom_Interactivity.ipynb) guide.
<center><div class="alert alert-info" role="alert">To use visualize and use a <b>DynamicMap</b> you need to be running a live Jupyter server.<br>This user guide assumes that it will be run in a live notebook environment.<br>
When viewed statically, DynamicMaps will only show the first available Element.<br></div></center>
```
# Styles and plot options used in this user guide
opts.defaults(
opts.Area(fill_color='cornsilk', line_width=2,
line_color='black'),
opts.Ellipse(bgcolor='white', color='black'),
opts.HLine(color='red', line_width=2),
opts.Image(cmap='viridis'),
opts.Path(bgcolor='white', color='black', line_dash='dashdot',
show_grid=False),
opts.VLine(color='red', line_width=2))
```
## A simple ``DynamicMap``
Before introducing streams, let us declare a simple ``DynamicMap`` of the sort discussed in the [Live Data](07-Live_Data.ipynb) user guide. This example consists of a ``Curve`` element showing a [Lissajous curve](https://en.wikipedia.org/wiki/Lissajous_curve) with ``VLine`` and ``HLine`` annotations to form a crosshair:
```
lin = np.linspace(-np.pi,np.pi,300)
def lissajous(t, a=3, b=5, delta=np.pi/2.):
return (np.sin(a * t + delta), np.sin(b * t))
def lissajous_crosshair(t, a=3, b=5, delta=np.pi/2):
(x,y) = lissajous(t,a,b,delta)
return hv.VLine(x) * hv.HLine(y)
crosshair = hv.DynamicMap(lissajous_crosshair, kdims='t').redim.range(t=(-3.,3.))
path = hv.Path(lissajous(lin))
path * crosshair
```
As expected, the declared key dimension (``kdims``) has turned into a slider widget that lets us move the crosshair along the curve. Now let's see how to position the crosshair using streams.
## Introducing streams
The core concept behind a stream is simple: it defines one or more parameters that can change over time that automatically refreshes code depending on those parameter values.
Like all objects in HoloViews, these parameters are declared using [param](https://ioam.github.io/param) and streams are defined as a parameterized subclass of the ``holoviews.streams.Stream``. A more convenient way is to use the ``Stream.define`` classmethod:
```
from holoviews.streams import Stream, param
Time = Stream.define('Time', t=0.0)
```
This results in a ``Time`` class with a numeric ``t`` parameter that defaults to zero. As this object is parameterized, we can use ``hv.help`` to view it's parameters:
```
hv.help(Time)
```
This parameter is a ``param.Number`` as we supplied a float, if we had supplied an integer it would have been a ``param.Integer``. Notice that there is no docstring in the help output above but we can add one by explicitly defining the parameter as follows:
```
Time = Stream.define('Time', t=param.Number(default=0.0, doc='A time parameter'))
hv.help(Time)
```
Now we have defined this ``Time`` stream class, we can make of an instance of it and look at its parameters:
```
time_dflt = Time()
print('This Time instance has parameter t={t}'.format(t=time_dflt.t))
```
As with all parameterized classes, we can choose to instantiate our parameters with suitable values instead of relying on defaults.
```
time = Time(t=np.pi/4)
print('This Time instance has parameter t={t}'.format(t=time.t))
```
For more information on defining ``Stream`` classes this way, use ``hv.help(Stream.define)``.
### Simple streams example
We can now supply this streams object to a ``DynamicMap`` using the same ``lissajous_crosshair`` callback from above by adding it to the ``streams`` list:
```
dmap = hv.DynamicMap(lissajous_crosshair, streams=[time])
path * dmap + path * lissajous_crosshair(t=np.pi/4.)
```
Immediately we see that the crosshair position of the ``DynamicMap`` reflects the ``t`` parameter values we set on the ``Time`` stream. This means that the ``t`` parameter was supplied as the argument to the ``lissajous_curve`` callback. As we now have no key dimensions, there is no longer a widget for the ``t`` dimensions.
Although we have what looks like a static plot, it is in fact dynamic and can be updated in place at any time. To see this, we can call the ``event`` method on our ``DynamicMap``:
```
dmap.event(t=0.2)
```
Running this cell will have updated the crosshair from its original position where $t=\frac{\pi}{4}$ to a new position where ``t=0.2``. Try running the cell above with different values of ``t`` and watch the plot update!
This ``event`` method is the recommended way of updating the stream parameters on a ``DynamicMap`` but if you have a handle on the relevant stream instance, you can also call the ``event`` method on that:
```
time.event(t=-0.2)
```
Running the cell above also moves the crosshair to a new position. As there are no key dimensions, there is only a single valid (empty) key that can be accessed with ``dmap[()]`` or ``dmap.select()`` making ``event`` the only way to explore new parameters.
We will examine the ``event`` method and the machinery that powers streams in more detail later in the user guide after we have looked at more examples of how streams are used in practice.
### Working with multiple streams
The previous example showed a curve parameterized by a single dimension ``t``. Often you will have multiple stream parameters you would like to declare as follows:
```
ls = np.linspace(0, 10, 200)
xx, yy = np.meshgrid(ls, ls)
XY = Stream.define('XY',x=0.0,y=0.0)
def marker(x,y):
return hv.VLine(x) * hv.HLine(y)
image = hv.Image(np.sin(xx)*np.cos(yy))
dmap = hv.DynamicMap(marker, streams=[XY()])
image * dmap
```
You can update both ``x`` and ``y`` by passing multiple keywords to the ``event`` method:
```
dmap.event(x=-0.2, y=0.1)
```
Note that the definition above behaves the same as the following definition where we define separate ``X`` and ``Y`` stream classes:
```python
X = Stream.define('X',x=0.0)
Y = Stream.define('Y',y=0.0)
hv.DynamicMap(marker, streams=[X(), Y()])
```
The reason why you might want to list multiple streams instead of always defining a single stream containing all the required stream parameters will be made clear in the [Custom Interactivity](./13-Custom_Interactivity.ipynb) guide.
## Using Parameterized classes as a stream
Creating a custom ``Stream`` class is one easy way to declare parameters, however in many cases you may have already expressed your domain knowledge on a ``Parameterized`` class. A ``DynamicMap`` can easily be linked to the parameters of the class using a so called ``Params`` stream, let's define a simple example which will let use dynamically alter the style applied to the ``Image`` from the previous example. We define a ``Style`` class with two parameters, one to control the colormap and another to vary the number of color levels:
```
from holoviews.streams import Params
class Style(param.Parameterized):
cmap = param.ObjectSelector(default='viridis', objects=['viridis', 'plasma', 'magma'])
color_levels = param.Integer(default=255, bounds=(1, 255))
style = Style()
stream = Params(style)
hv.DynamicMap(image.opts, streams=[stream]).opts(colorbar=True, width=400)
```
Instead of providing a custom callback function we supplied the ``image.opts`` method, which applies the parameters directly as options. Unlike a regular streams class the plot will update whenever a parameter on the instance or class changes, e.g. we can update set the ``cmap`` and ``color_level`` parameters and watch the plot update in response:
```
style.color_levels = 10
style.cmap = 'plasma'
```
This is a powerful pattern to link parameters to a plot, particularly when combined with the [Panel](http://panel.pyviz.org/) library, which makes it easy to generate a set of widgets from a Parameterized class. To see how this works in practice see the [Dashboards user guide](./16-Dashboards.ipynb).
## Combining streams and key dimensions
All the ``DynamicMap`` examples above can't be indexed with anything other than ``dmap[()]`` or ``dmap.select()`` as none of them had any key dimensions. This was to focus exclusively on the streams system at the start of the user guide and not because you can't combine key dimensions and streams:
```
xs = np.linspace(-3, 3, 400)
def function(xs, time):
"Some time varying function"
return np.exp(np.sin(xs+np.pi/time))
def integral(limit, time):
curve = hv.Curve((xs, function(xs, time)))[limit:]
area = hv.Area ((xs, function(xs, time)))[:limit]
summed = area.dimension_values('y').sum() * 0.015 # Numeric approximation
return (area * curve * hv.VLine(limit) * hv.Text(limit + 0.5, 2.0, '%.2f' % summed))
Time = Stream.define('Time', time=1.0)
dmap=hv.DynamicMap(integral, kdims='limit', streams=[Time()]).redim.range(limit=(-3,2))
dmap
```
In this example, you can drag the slider to see a numeric approximation to the integral on the left side on the ``VLine``.
As ``'limit'`` is declared as a key dimension, it is given a normal HoloViews slider. As we have also defined a ``time`` stream, we can update the displayed curve for any time value:
```
dmap.event(time=8)
```
We now see how to control the ``time`` argument of the integral function by triggering an event with a new time value, and how to control the ``limit`` argument by moving a slider. Controlling ``limit`` with a slider this way is valid but also a little unintuitive: what if you could control ``limit`` just by hovering over the plot?
In the [Custom Interactivity](13-Custom_Interactivity.ipynb) user guide, we will see how we can do exactly this by switching to the bokeh backend and using the linked streams system.
### Matching names to arguments
Note that in the example above, the key dimension names and the stream parameter names match the arguments to the callable. This *must* be true for stream parameters but this isn't a requirement for key dimensions: if you replace the word 'radius' with 'size' in the example above after ``XY`` is defined, the example still works.
Here are the rules regarding the callback argument names:
* If your key dimensions and stream parameters match the callable argument names, the definition is valid.
* If your callable accepts mandatory positional arguments and their number matches the number of key dimensions, the names don't need to match and these arguments will be passed key dimensions values.
As stream parameters always need to match the argument names, there is a method to allow them to be easily renamed. Let's say you imported a stream class as shown in [Custom_Interactivity](13-Custom_Interactivity.ipynb) or for this example, reuse the existing ``XY`` stream class. You can then use the ``rename`` method allowing the following definition:
```
def integral2(lim, t):
'Same as integral with different argument names'
return integral(lim, t)
dmap = hv.DynamicMap(integral2, kdims='limit', streams=[Time().rename(time='t')]).redim.range(limit=(-3.,3.))
dmap
```
Occasionally, it is useful to suppress some of the stream parameters of a stream class, especially when using the *linked streams* described in [Custom_Interactivity](13-Custom_Interactivity.ipynb). To do this you can rename the stream parameter to ``None`` so that you no longer need to worry about it being passed as an argument to the callable. To re-enable a stream parameter, it is sufficient to either give the stream parameter it's original string name or a new string name.
## Overlapping stream and key dimensions
In the above example above, the stream parameters do not overlap with the declared key dimension. What happens if we add 'time' to the declared key dimensions?
```
dmap=hv.DynamicMap(integral, kdims=['time','limit'], streams=[Time()]).redim.range(limit=(-3.,3.))
dmap
```
First you might notice that the 'time' value is now shown in the title but that there is no corresponding time slider as its value is supplied by the stream.
The 'time' parameter is now an instance of what are called 'dimensioned streams' which reenable indexing of these dimensions:
```
dmap[1,0] + dmap.select(time=3,limit=1.5) + dmap[None,1.5]
```
In **A**, we supply our own values for the 'time and 'limit' parameters. This doesn't change the values of the 'time' parameters on the stream itself but it does allow us to see what would happen when the time value is one. Note the use of ``None`` in **C** as a way of leaving an explicit value unspecified, allowing the current stream value to be used.
This is one good reason to use dimensioned streams - it restores access to convenient indexing and selecting operation as a way of exploring your visualizations. The other reason it is useful is that if you keep all your parameters dimensioned, it re-enables the ``DynamicMap`` cache described in the [Live Data](07-Live_Data.ipynb), allowing you to record your interaction with streams and allowing you to cast to ``HoloMap`` for export:
```
dmap.reset() # Reset the cache, we don't want the values from the cell above
# TODO: redim the limit dimension to a default of 0
dmap.event(time=1)
dmap.event(time=1.5)
dmap.event(time=2)
hv.HoloMap(dmap)
```
One use of this would be to have a simulator drive a visualization forward using ``event`` in a loop. You could then stop your simulation and retain the recent history of the output as long as the allowed ``DynamicMap`` cache.
## Generators and argument-free callables
In addition to callables, Python supports [generators](https://docs.python.org/3/glossary.html#term-generator) that can be defined with the ``yield`` keyword. Calling a function that uses yield returns a [generator iterator](https://docs.python.org/3/glossary.html#term-generator-iterator) object that accepts no arguments but returns new values when iterated or when ``next()`` is applied to it.
HoloViews supports Python generators for completeness and [generator expressions](https://docs.python.org/3/glossary.html#term-generator-expression) can be a convenient way to define code inline instead of using lambda functions. As generators expressions don't accept arguments and can get 'exhausted' ***we recommend using callables with ``DynamicMap``*** - exposing the relevant arguments also exposes control over your visualization.
Unlike generators, callables that have arguments allow you to re-visit portions of your parameter space instead of always being forced in one direction via calls to ``next()``. With this caveat in mind, here is an example of a generator and the corresponding generator iterator that returns a ``BoxWhisker`` element:
```
def sample_distributions(samples=10, tol=0.04):
np.random.seed(42)
while True:
gauss1 = np.random.normal(size=samples)
gauss2 = np.random.normal(size=samples)
data = (['A']*samples + ['B']*samples, np.hstack([gauss1, gauss2]))
yield hv.BoxWhisker(data, 'Group', 'Value')
samples+=1
sample_generator = sample_distributions()
```
This returns two box whiskers representing samples from two Gaussian distributions of 10 samples. Iterating over this generator simply resamples from these distributions using an additional sample each time.
As with a callable, we can pass our generator iterator to ``DynamicMap``:
```
hv.DynamicMap(sample_generator)
```
Without using streams, we now have a problem as there is no way to trigger the generator to view the next distribution in the sequence. We can solve this by defining a stream with no parameters:
```
dmap = hv.DynamicMap(sample_generator, streams=[Stream.define('Next')()])
dmap
```
### Stream event update loops
Now we can simply use ``event()`` to drive the generator forward and update the plot, showing how the two Gaussian distributions converge as the number of samples increase.
```
for i in range(40):
dmap.event()
```
Note that there is a better way to run loops that drive ``dmap.event()`` which supports a ``period`` (in seconds) between updates and a ``timeout`` argument (also in seconds):
```
dmap.periodic(0.1, 1000, timeout=3)
```
In this generator example, ``event`` does not require any arguments but you can set the ``param_fn`` argument to a callable that takes an iteration counter and returns a dictionary for setting the stream parameters. In addition you can use ``block=False`` to avoid blocking the notebook using a threaded loop. This can be very useful although it has two downsides 1. all running visualizations using non-blocking updates will be competing for computing resources 2. if you override a variable that the thread is actively using, there can be issues with maintaining consistent state in the notebook.
Generally, the ``periodic`` utility is recommended for all such event update loops and it will be used instead of explicit loops in the rest of the user guides involving streams.
### Using ``next()``
The approach shown above of using an empty stream works in an exactly analogous fashion for callables that take no arguments. In both cases, the ``DynamicMap`` ``next()`` method is enabled:
```
hv.HoloMap({i:next(dmap) for i in range(10)}, kdims='Iteration')
```
## Next steps
The streams system allows you to update plots in place making it possible to build live visualizations that update in response to incoming live data or any other type of event. As we have seen in this user guide, you can use streams together with key dimensions to add additional interactivity to your plots while retaining the familiar widgets.
This user guide used examples that work with either the matplotlib or bokeh backends. In the [Custom Interactivity](13-Custom_Interactivity.ipynb) user guide, you will see how you can directly interact with dynamic visualizations when using the bokeh backend.
## [Advanced] How streams work
This optional section is not necessary for users who simply want to use the streams system, but it does describe how streams actually work in more detail.
A stream class is one that inherits from ``Stream`` that typically defines some new parameters. We have already seen one convenient way of defining a stream class:
```
defineXY = Stream.define('defineXY', x=0.0, y=0.0)
```
This is equivalent to the following definition which would be more appropriate in library code or for complex stream class requiring lots of parameters that need to be documented:
```
class XY(Stream):
x = param.Number(default=0.0, constant=True, doc='An X position.')
y = param.Number(default=0.0, constant=True, doc='A Y position.')
```
As we have already seen, we can make an instance of ``XY`` with some initial values for ``x`` and ``y``.
```
xy = XY(x=2,y=3)
```
However, trying to modify these parameters directly will result in an exception as they have been declared constant (e.g ``xy.x=4`` will throw an error). This is because there are two allowed ways of modifying these parameters, the simplest one being ``update``:
```
xy.update(x=4,y=50)
xy.rename(x='xpos', y='ypos').contents
```
This shows how you can update the parameters and also shows the correct way to view the stream parameter values via the ``contents`` property as this will apply any necessary renaming.
So far, using ``update`` has done nothing but force us to access parameter a certain way. What makes streams work are the side-effects you can trigger when changing a value via the ``event`` method. The relevant side-effect is to invoke callables called 'subscribers'
### Subscribers
Without defining any subscribes, the ``event`` method is identical to ``update``:
```
xy = XY()
xy.event(x=4,y=50)
xy.contents
```
Now let's add a subscriber:
```
def subscriber(xpos,ypos):
print('The subscriber received xpos={xpos} and ypos={ypos}'.format(xpos=xpos,ypos=ypos))
xy = XY().rename(x='xpos', y='ypos')
xy.add_subscriber(subscriber)
xy.event(x=4,y=50)
```
As we can see, now when you call ``event``, our subscriber is called with the updated parameter values, renamed as appropriate. The ``event`` method accepts the original parameter names and the subscriber receives the new values after any renaming is applied. You can add as many subscribers as you want and you can clear them using the ``clear`` method:
```
xy.clear()
xy.event(x=0,y=0)
```
When you define a ``DynamicMap`` using streams, the HoloViews plotting system installs the necessary callbacks as subscibers to update the plot when the stream parameters change. The above example clears all subscribers (it is equivalent to ``clear('all')``. To clear only the subscribers you define yourself use ``clear('user')`` and to clear any subscribers installed by the HoloViews plotting system use ``clear('internal')``.
When using linked streams as described in the [Custom Interactivity](13-Custom_Interactivity.ipynb) user guide, the plotting system recognizes the stream class and registers the necessary machinery with Bokeh to update the stream values based on direct interaction with the plot.
| github_jupyter |
```
# random data with a normal distribution curve thrown on top
from scipy.stats import norm
x = np.random.rand(1000)
fig, ax = plt.subplots()
ax = sns.distplot(x, fit=norm, kde=False)
# normal distribution vs. standard normal distribution
# 3 flavors of code using numpy/scipy to get simulated normal distribution
# scipy needed for the fit=norm in the seaborns distplot
from scipy.stats import norm
# flavor 1: normal distribution providing a mean and standard deviation
mu, sigma = 0, 1 # mean and standard deviation
x1 = np.random.normal(mu, sigma, 4000)
# flavor 2: standard normal distribution with mean = 0 and standard deviation = 1
x2 = np.random.randn(4000)
# flavor 3: scipy way
x3_x = []
x3_y = []
for i in np.arange(-4,4,0.01):
x3_x.append(i)
x3_y.append(norm.pdf(i))
# output 3
fig, ax = plt.subplots()
ax = sns.distplot(x1, fit=norm, kde=False)
ax = sns.distplot(x2, fit=norm, kde=False)
ax = sns.lineplot(x=x3_x,y=x3_y, color='red',ls='--')
plt.tight_layout()
plt.savefig(fname='normal distribution.png', dpi=150)
plt.show()
# Normal Distribution Example
# https://towardsdatascience.com/understanding-the-normal-distribution-with-python-e70bb855b027
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
import seaborn as sns
# Starting data in inches
# Assume that the true mean height of a person is 5 feet 6 inches
# and the true standard deviation is 1 foot (12 inches)
# Also define a variable called “target” of 6 feet, which is the height that our friend inquired about
mean_height = 5.5*12
stdev_height = 1*12
target = 6*12
# make a 10,000 by 10 array to hold our survey results, where each row is a survey of people heights
# Could have given the random variable function a mean and a standard deviation,
# but manually customizing the mean and standard deviation of your random variable gives more intuition:
# A) You can shift the central location to where you want it
# B) You can shift the standard deviation of normally distributed random variable, by multiplying a constant
# np.random.normal generates a random number normally distributed with mean of 0 and a standard deviation of 1
# we customize the variable by multiplying volatility and then adding mean height to shift central location
random_variable = mean_height + np.random.normal()*stdev_height
# populate the array
height_surveys = np.zeros((10000,10))
for i in range(height_surveys.shape[0]):
for j in range(height_surveys.shape[1]):
height_surveys[i,j] = mean_height + np.random.normal()*stdev_height
print('Mean Height: ', round(np.mean(height_surveys)/12,1), ' feet')
print('Standard Deviation of Height: ', round(np.var(height_surveys)**0.5 / 12, 1), ' feet')
# Multi Plot using Seaborns subplot sns
# f, axes = plt.subplots(1, 2)
# sns.boxplot( y="b", x= "a", data=df, orient='v' , ax=axes[0])
# sns.boxplot( y="c", x= "a", data=df, orient='v' , ax=axes[1])
fig3 = plt.figure(figsize=(12,8))
gs = fig3.add_gridspec(ncols=4, nrows=5)
ax1 = fig3.add_subplot(gs[0:2, :])
ax1.set_title('Plot 1: Single Sample Distribution')
ax2 = fig3.add_subplot(gs[2:5, 0:2])
ax2.set_title('Plot 2: Population Distribution')
ax3 = fig3.add_subplot(gs[2:5, -2:])
ax3.set_title('Plot 3: Sample Generated Distribution vs Population Distribution')
plt.style.use('ggplot') # ggplot tableau-colorblind10 seaborn-dark-palette bmh default
# Plot 1 - One Sample
# randomly pick one sample and plot it out = single sample histogram analysis
# sample_survey = height_surveys[1][:]
ax1.hist(sample_survey, label='People Height')
# vertical line to show the mean
ax1.axvline(x=sample_survey.mean(), ymin=0, ymax=1, label=('single sample mean=' + str(round(sample_survey.mean(),2))))
ax1.set_xlabel("Height in Inches",fontsize=12)
ax1.set_ylabel("Frequency",fontsize=12)
# randomly picked sample again #2
print('Sample Standard Deviation: ' , np.var(height_surveys[2])**0.5, '') # = 12.5
# Plot 2
# now look at all samples
# histogram of all samples to show true population mean across all surveys
sns.distplot(np.mean(height_surveys,axis=1),
kde=False, label='People Height', color='blue', ax=ax2)
ax2.set_xlabel("Height in Inches",fontsize=12)
ax2.set_ylabel("Frequency",fontsize=12)
ax2.axvline(x=target, color='red', label=('target=' + str(round(target,2))))
ax2.legend()
plt.tight_layout()
# Plot 3
# compare sample mean vs. true mean
# plot histogram to show all means for each survey, for all surveys
sns.distplot(np.mean(height_surveys,axis=1),
kde=False, label='True', color='blue', ax=ax3)
ax3.set_xlabel("Height in Inches",fontsize=14)
ax3.set_ylabel("Frequency",fontsize=14)
ax3.axvline(x=target, color='red', label=('target=' + str(round(target,2))))
# Calculate stats using single sample
sample_mean = np.mean(height_surveys[3])
sample_stdev = np.var(height_surveys[3])**0.5
# Calculate standard error = sample std dev / sqrt(N) where N = 10 in this example
std_error = sample_stdev/(height_surveys[3].shape[0])**0.5
########IMPORTANT#############################
# Infer distribution using single sample into 10000 samples
inferred_distribution = [sample_mean + np.random.normal()*\
std_error for i in range(10000)]
# Plot histogram of inferred distribution
sns.distplot(inferred_distribution, kde=False, label='Inferred', color='red', ax=ax3)
ax3.set_xlabel("Height in Inches",fontsize=12)
ax3.set_ylabel("Frequency",fontsize=12)
ax3.legend()
plt.tight_layout()
# If jupyter output starts to scroll, how to remove/undo/turn off the scroll output cell:
# I just placed my cursor in the grey box next to the output and clicked and then all of the output was displayed.
# true vs all surveys combined
fig, ax = plt.subplots(figsize=(12,8))
# True Population
sns.distplot(np.mean(height_surveys,axis=1), kde=False,
label='True', color='darkblue', hist_kws = {"alpha": 1.0}, ax=ax)
ax.axvline(x=target, color='red', label=('target=' + str(round(target,2))))
# If you unwrap all surveys, can make distribution wider than True
sns.distplot(height_surveys.flatten()[:height_surveys.shape[0]],
kde=False,
label='All Surveys Height Distribution', hist_kws = {"alpha": 0.2}, ax=ax)
ax.set_xlabel("Height in Inches",fontsize=14)
ax.set_ylabel("Frequency",fontsize=14)
plt.legend()
# Variance Analysis (x axis should be smaller now)
# The distribution of the sample standard deviations is also roughly normal (Plot it to prove it)
# Remember that a sample standard deviation is the standard deviation of a single survey of 10 people)
# Take each sample, calculate the standard deviation of each sample, plot it
# Then analyze the "Standard Deviation of Standard Deviations"
# How much does the standard deviation vary?
# Calculate the standard deviation for each sample (square root of the variance)
# np.var:
# axis = None is default: The default is to compute the variance of the flattened array.
# axis = 1 calculates each row (axis = 0: calculates down each of the columns)
volatility_dist = ( np.var(height_surveys,axis=1) )**0.5
# Histogram to show distribution of 10000 sample standard deviation
fig, ax = plt.subplots(figsize=(12,8))
sns.set_style('dark')
sns.distplot(volatility_dist, kde=False, label='The Distribution of the Sample Standard Deviations', color='navy',hist_kws = {"alpha": 0.8})
ax.set_xlabel("Inches",fontsize=12)
ax.set_ylabel("Frequency",fontsize=12)
ax.legend()
###################################################################################################
# Add more analysis
# How does standard deviation distribution magnitude compare to previous plot (sample distribution of heights)?
# standard error = standard deviation / sqrt(N)
SE_dist = volatility_dist/(height_surveys.shape[1]**0.5)
sns.distplot(SE_dist, kde=False, label='Sample Standard Error Distribution1', color='red', ax=ax)
ax.set_xlabel("Height in Inches",fontsize=12)
ax.set_ylabel("Frequency",fontsize=12)
ax.legend()
plt.tight_layout()
###################################################################################################
# Annotate with text + Arrow
# Label and coordinate
ax.annotate('This standard deviation is so wide!',
xy=(5, 50), xytext=(17, 50),
arrowprops={'arrowstyle': '<->','lw': 2, 'color': 'red'}, va='center') # Custom arrow
# regular text
ax.text(20,400, 'hello', fontsize=12)
# boxed text
ax.text(20, 550, 'boxed italics text in data coords', style='italic',
bbox={'facecolor': 'red', 'alpha': 0.5, 'pad': 10})
# equation text
ax.text(20, 500, r'an equation: $E=mc^2$', fontsize=15)
plt.tight_layout()
# Increasing N observations to shrink the standard error
random_variable = mean_height + np.random.normal()*stdev_height
# populate the array
height_surveys = np.zeros((10000,30))
for i in range(height_surveys.shape[0]):
for j in range(height_surveys.shape[1]):
height_surveys[i,j] = mean_height + np.random.normal()*stdev_height
print('Mean Height: ', round(np.mean(height_surveys)/12,1), ' feet')
print('Standard Deviation of Height: ', round(np.var(height_surveys)**0.5 / 12, 1), ' feet')
# Variance Analysis (x axis should be smaller now)
# The distribution of the sample standard deviations is also roughly normal (Plot it to prove it)
# Remember that a sample standard deviation is the standard deviation of a single survey of 10 people)
# Take each sample, calculate the standard deviation of each sample, plot it
# Then analyze the "Standard Deviation of Standard Deviations"
# How much does the standard deviation vary?
# Calculate the standard deviation for each sample (square root of the variance)
# np.var:
# axis = None is default: The default is to compute the variance of the flattened array.
# axis = 1 calculates each row (axis = 0: calculates down each of the columns)
volatility_dist = ( np.var(height_surveys,axis=1) )**0.5
# Histogram to show distribution of 10000 sample standard deviation
fig, ax = plt.subplots(figsize=(12,8))
sns.set_style('dark')
sns.distplot(volatility_dist, kde=False, label='The Distribution of the Sample Standard Deviations',
color='navy',hist_kws = {"alpha": 0.8})
ax.set_xlabel("Inches",fontsize=12)
ax.set_ylabel("Frequency",fontsize=12)
ax.legend()
###################################################################################################
# Add more analysis
# How does standard deviation distribution magnitude compare to previous plot (sample distribution of heights)?
# standard error = standard deviation / sqrt(N)
SE_dist2 = volatility_dist/(height_surveys.shape[1]**0.5)
sns.distplot(SE_dist2 , kde=False, label='Sample Standard Error Distribution2', color='red', ax=ax)
sns.distplot(SE_dist , kde=False, label='Sample Standard Error Distribution1', color='orange', ax=ax)
ax.set_xlabel("Height in Inches",fontsize=12)
ax.set_ylabel("Frequency",fontsize=12)
ax.legend()
plt.tight_layout()
###################################################################################################
# Annotate with text + Arrow
# Label and coordinate
ax.annotate('This standard deviation is so wide!',
xy=(5, 50), xytext=(17, 50),
arrowprops={'arrowstyle': '<->','lw': 2, 'color': 'red'}, va='center') # Custom arrow
# regular text
ax.text(20,400, 'hello', fontsize=12)
# boxed text
ax.text(20, 550, 'boxed italics text in data coords', style='italic',
bbox={'facecolor': 'red', 'alpha': 0.5, 'pad': 10})
# equation text
ax.text(20, 500, r'an equation: $E=mc^2$', fontsize=15)
plt.tight_layout()
# Calculate the probability of a person being 6 feet or taller
# Method 1: using simulation generating 10000 random variables
inferred_dist = [sample_mean + np.random.normal()*std_error for i in range(10000)]
probability1 = sum([1 for i in inferred_dist if i>=target])/len(inferred_dist)
print('The simulated probability is ', probability1)
# Method 2: using cumulative distribution function
probability2 = 1 - norm.cdf(target, loc=sample_mean, scale=std_error)
print('The calculated probability is ', round(probability2,5))
'''
Plot a normally distributed random variable - and samples of this process - using scipy's univariate probability distributions.
'''
from scipy.stats import norm
import matplotlib.pyplot as plt
import pandas as pd
# Define parameters for normal distribution.
mu = 0
sigma = 5
rng = range(-30,30)
# Generate normal distribution with given mean and standard deviation.
dist = norm(mu, sigma)
# Plot probability density function and of this distribution.
# the pdf() method takes takes in a list x values and returns a list of y's.
plt.figure(figsize=(10, 10))
plt.subplot(311) # Creates a 3 row, 1 column grid of plots, and renders the following chart in slot 1.
plt.plot(rng, dist.pdf(rng), 'r', linewidth=2)
plt.title('Probability density function of normal distribution')
# Plot probability density function and of this distribution.
plt.subplot(312)
plt.plot(rng, dist.cdf(rng))
plt.title('Cumulutative distribution function of normal distribution')
# Draw 1000 samples from the random variable.
sample = dist.rvs(size=10000)
print ("Sample descriptive statistics:")
print (pd.DataFrame(sample).describe())
# Plot a histogram of the samples.
plt.subplot(313)
plt.hist(sample, bins=50, density=True)
plt.plot(rng, dist.pdf(rng), 'r--', linewidth=2)
plt.title('10,000 random samples from normal distribution')
# Show all plots.
plt.show()
print('x')
```
| github_jupyter |
```
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
## Install NeMo if using google collab or if its not installed locally
BRANCH = 'r1.7.0'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]
## Install dependencies
!pip install wget
!pip install faiss-gpu
import faiss
import torch
import wget
import os
import numpy as np
import pandas as pd
from omegaconf import OmegaConf
from pytorch_lightning import Trainer
from IPython.display import display
from tqdm import tqdm
from nemo.collections import nlp as nemo_nlp
from nemo.utils.exp_manager import exp_manager
```
## Entity Linking
#### Task Description
[Entity linking](https://en.wikipedia.org/wiki/Entity_linking) is the process of connecting concepts mentioned in natural language to their canonical forms stored in a knowledge base. For example, say a knowledge base contained the entity 'ID3452 influenza' and we wanted to process some natural language containing the sentence "The patient has flu like symptoms". An entity linking model would match the word 'flu' to the knowledge base entity 'ID3452 influenza', allowing for disambiguation and normalization of concepts referenced in text. Entity linking applications range from helping automate data ingestion to assisting in real time dialogue concept normalization. We will be focusing on entity linking in the medical domain for this demo, but the entity linking model, dataset, and training code within NVIDIA NeMo can be applied to other domains like finance and retail.
Within NeMo and this tutorial we use the entity linking approach described in Liu et. al's NAACL 2021 "[Self-alignment Pre-training for Biomedical Entity Representations](https://arxiv.org/abs/2010.11784v2)". The main idea behind this approach is to reshape an initial concept embedding space such that synonyms of the same concept are pulled closer together and unrelated concepts are pushed further apart. The concept embeddings from this reshaped space can then be used to build a knowledge base embedding index. This index stores concept IDs mapped to their respective concept embeddings in a format conducive to efficient nearest neighbor search. We can link query concepts to their canonical forms in the knowledge base by performing a nearest neighbor search- matching concept query embeddings to the most similar concepts embeddings in the knowledge base index.
In this tutorial we will be using the [faiss](https://github.com/facebookresearch/faiss) library to build our concept index.
#### Self Alignment Pretraining
Self-Alignment pretraining is a second stage pretraining of an existing encoder (called second stage because the encoder model can be further finetuned after this more general pretraining step). The dataset used during training consists of pairs of concept synonyms that map to the same ID. At each training iteration, we only select *hard* examples present in the mini batch to calculate the loss and update the model weights. In this context, a hard example is an example where a concept is closer to an unrelated concept in the mini batch than it is to the synonym concept it is paired with by some margin. I encourage you to take a look at [section 2 of the paper](https://arxiv.org/pdf/2010.11784.pdf) for a more formal and in depth description of how hard examples are selected.
We then use a [metric learning loss](https://openaccess.thecvf.com/content_CVPR_2019/papers/Wang_Multi-Similarity_Loss_With_General_Pair_Weighting_for_Deep_Metric_Learning_CVPR_2019_paper.pdf) calculated from the hard examples selected. This loss helps reshape the embedding space. The concept representation space is rearranged to be more suitable for entity matching via embedding cosine similarity.
Now that we have idea of what's going on, let's get started!
## Dataset Preprocessing
```
# Download data into project directory
PROJECT_DIR = "." #Change if you don't want the current directory to be the project dir
DATA_DIR = os.path.join(PROJECT_DIR, "tiny_example_data")
if not os.path.isdir(os.path.join(DATA_DIR)):
wget.download('https://dldata-public.s3.us-east-2.amazonaws.com/tiny_example_data.zip',
os.path.join(PROJECT_DIR, "tiny_example_data.zip"))
!unzip {PROJECT_DIR}/tiny_example_data.zip -d {PROJECT_DIR}
```
In this tutorial we will be using a tiny toy dataset to demonstrate how to use NeMo's entity linking model functionality. The dataset includes synonyms for 12 medical concepts. Entity phrases with the same ID are synonyms for the same concept. For example, "*chronic kidney failure*", "*gradual loss of kidney function*", and "*CKD*" are all synonyms of concept ID 5. Here's the dataset before preprocessing:
```
raw_data = pd.read_csv(os.path.join(DATA_DIR, "tiny_example_dev_data.csv"), names=["ID", "CONCEPT"], index_col=False)
print(raw_data)
```
We've already paired off the concepts for this dataset with the format `ID concept_synonym1 concept_synonym2`. Here are the first ten rows:
```
training_data = pd.read_table(os.path.join(DATA_DIR, "tiny_example_train_pairs.tsv"), names=["ID", "CONCEPT_SYN1", "CONCEPT_SYN2"], delimiter='\t')
print(training_data.head(10))
```
Use the [Unified Medical Language System (UMLS)](https://www.nlm.nih.gov/research/umls/index.html) dataset for full medical domain entity linking training. The data contains over 9 million entities and is a table of medical concepts with their corresponding concept IDs (CUI). After [requesting a free license and making a UMLS Terminology Services (UTS) account](https://www.nlm.nih.gov/research/umls/index.html), the [entire UMLS dataset](https://www.nlm.nih.gov/research/umls/licensedcontent/umlsknowledgesources.html) can be downloaded from the NIH's website. If you've cloned the NeMo repo you can run the data processing script located in `examples/nlp/entity_linking/data/umls_dataset_processing.py` on the full dataset. This script will take in the initial table of UMLS concepts and produce a .tsv file with each row formatted as `CUI\tconcept_synonym1\tconcept_synonym2`. Once the UMLS dataset .RRF file is downloaded, the script can be run from the `examples/nlp/entity_linking` directory like so:
```
python data/umls_dataset_processing.py
```
## Model Training
Second stage pretrain a BERT Base encoder on the self-alignment pretraining task (SAP) for improved entity linking. Using a GPU, the model should take 5 minutes or less to train on this example dataset and training progress will be output below the cell.
```
#Download config
wget.download("https://raw.githubusercontent.com/vadam5/NeMo/main/examples/nlp/entity_linking/conf/tiny_example_entity_linking_config.yaml",
os.path.join(PROJECT_DIR, "tiny_example_entity_linking_config.yaml"))
# Load in config file
cfg = OmegaConf.load(os.path.join(PROJECT_DIR, "tiny_example_entity_linking_config.yaml"))
# Set config file variables
cfg.project_dir = PROJECT_DIR
cfg.model.nemo_path = os.path.join(PROJECT_DIR, "tiny_example_sap_bert_model.nemo")
cfg.model.train_ds.data_file = os.path.join(DATA_DIR, "tiny_example_train_pairs.tsv")
cfg.model.validation_ds.data_file = os.path.join(DATA_DIR, "tiny_example_validation_pairs.tsv")
# remove distributed training flags
cfg.trainer.accelerator = None
# Initialize the trainer and model
trainer = Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
model = nemo_nlp.models.EntityLinkingModel(cfg=cfg.model, trainer=trainer)
# Train and save the model
trainer.fit(model)
model.save_to(cfg.model.nemo_path)
```
You can run the script at `examples/nlp/entity_linking/self_alignment_pretraining.py` to train a model on a larger dataset. Run
```
python self_alignment_pretraining.py project_dir=.
```
from the `examples/nlp/entity_linking` directory.
## Model Evaluation
Let's evaluate our freshly trained model and compare its performance with a BERT Base encoder that hasn't undergone self-alignment pretraining. We first need to restore our trained model and load our BERT Base Baseline model.
```
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Restore second stage pretrained model
sap_model_cfg = cfg
sap_model_cfg.index.index_save_name = os.path.join(PROJECT_DIR, "tiny_example_entity_linking_index")
sap_model_cfg.index.index_ds.data_file = os.path.join(DATA_DIR, "tiny_example_index_data.tsv")
sap_model = nemo_nlp.models.EntityLinkingModel.restore_from(sap_model_cfg.model.nemo_path).to(device)
# Load original model
base_model_cfg = OmegaConf.load(os.path.join(PROJECT_DIR, "tiny_example_entity_linking_config.yaml"))
# Set train/val datasets to None to avoid loading datasets associated with training
base_model_cfg.model.train_ds = None
base_model_cfg.model.validation_ds = None
base_model_cfg.index.index_save_name = os.path.join(PROJECT_DIR, "base_model_index")
base_model_cfg.index.index_ds.data_file = os.path.join(DATA_DIR, "tiny_example_index_data.tsv")
base_model = nemo_nlp.models.EntityLinkingModel(base_model_cfg.model).to(device)
```
We are going evaluate our model on a nearest neighbor task using top 1 and top 5 accuracies as our metric. We will be using a tiny example test knowledge base and test queries. For this evaluation we are going to be comparing every test query with every concept vector in our test set knowledge base. We will rank each item in the knowledge base by its cosine similarity with the test query. We'll then compare the IDs of the predicted most similar test knowledge base concepts with our ground truth query IDs to calculate top 1 and top 5 accuracies. For this metric higher is better.
```
# Helper function to get data embeddings
def get_embeddings(model, dataloader):
embeddings, cids = [], []
with torch.no_grad():
for batch in tqdm(dataloader):
input_ids, token_type_ids, attention_mask, batch_cids = batch
batch_embeddings = model.forward(input_ids=input_ids.to(device),
token_type_ids=token_type_ids.to(device),
attention_mask=attention_mask.to(device))
# Accumulate index embeddings and their corresponding IDs
embeddings.extend(batch_embeddings.cpu().detach().numpy())
cids.extend(batch_cids)
return embeddings, cids
def evaluate(model, test_kb, test_queries, ks):
# Initialize knowledge base and query data loaders
test_kb_dataloader = model.setup_dataloader(test_kb, is_index_data=True)
test_query_dataloader = model.setup_dataloader(test_queries, is_index_data=True)
# Get knowledge base and query embeddings
test_kb_embs, test_kb_cids = get_embeddings(model, test_kb_dataloader)
test_query_embs, test_query_cids = get_embeddings(model, test_query_dataloader)
# Calculate the cosine distance between each query and knowledge base concept
score_matrix = np.matmul(np.array(test_query_embs), np.array(test_kb_embs).T)
accs = {k : 0 for k in ks}
# Compare the knowledge base IDs of the knowledge base entities with
# the smallest cosine distance from the query
for query_idx in tqdm(range(len(test_query_cids))):
query_emb = test_query_embs[query_idx]
query_cid = test_query_cids[query_idx]
query_scores = score_matrix[query_idx]
for k in ks:
topk_idxs = np.argpartition(query_scores, -k)[-k:]
topk_cids = [test_kb_cids[idx] for idx in topk_idxs]
# If the correct query ID is amoung the top k closest kb IDs
# the model correctly linked the entity
match = int(query_cid in topk_cids)
accs[k] += match
for k in ks:
accs[k] /= len(test_query_cids)
return accs
# Create configs for our test data
test_kb = OmegaConf.create({
"data_file": os.path.join(DATA_DIR, "tiny_example_test_kb.tsv"),
"max_seq_length": 128,
"batch_size": 10,
"shuffle": False,
})
test_queries = OmegaConf.create({
"data_file": os.path.join(DATA_DIR, "tiny_example_test_queries.tsv"),
"max_seq_length": 128,
"batch_size": 10,
"shuffle": False,
})
ks = [1, 5]
# Evaluate both models on our test data
base_accs = evaluate(base_model, test_kb, test_queries, ks)
base_accs["Model"] = "BERT Base Baseline"
sap_accs = evaluate(sap_model, test_kb, test_queries, ks)
sap_accs["Model"] = "BERT + SAP"
print("Top 1 and Top 5 Accuracy Comparison:")
results_df = pd.DataFrame([base_accs, sap_accs], columns=["Model", 1, 5])
results_df = results_df.style.set_properties(**{'text-align': 'left', }).set_table_styles([dict(selector='th', props=[('text-align', 'left')])])
display(results_df)
```
The purpose of this section was to show an example of evaluating your entity linking model. This evaluation set contains very little data, and no serious conclusions should be drawn about model performance. Top 1 accuracy should be between 0.7 and 1.0 for both models and top 5 accuracy should be between 0.8 and 1.0. When evaluating a model trained on a larger dataset, you can use a nearest neighbors index to speed up the evaluation time.
## Building an Index
To qualitatively observe the improvement we gain from the second stage pretraining, let's build two indices. One will be built with BERT base embeddings before self-alignment pretraining and one will be built with the model we just trained. Our knowledge base in this tutorial will be in the same domain and have some overlapping concepts as the training set. This data file is formatted as `ID\tconcept`.
The `EntityLinkingDataset` class can load the data used for training the entity linking encoder as well as for building the index if the `is_index_data` flag is set to true.
```
def build_index(cfg, model):
# Setup index dataset loader
index_dataloader = model.setup_dataloader(cfg.index.index_ds, is_index_data=True)
# Get index dataset embeddings
embeddings, _ = get_embeddings(model, index_dataloader)
# Train IVFFlat index using faiss
embeddings = np.array(embeddings)
quantizer = faiss.IndexFlatL2(cfg.index.dims)
index = faiss.IndexIVFFlat(quantizer, cfg.index.dims, cfg.index.nlist)
index = faiss.index_cpu_to_all_gpus(index)
index.train(embeddings)
# Add concept embeddings to index
for i in tqdm(range(0, embeddings.shape[0], cfg.index.index_batch_size)):
index.add(embeddings[i:i+cfg.index.index_batch_size])
# Save index
faiss.write_index(faiss.index_gpu_to_cpu(index), cfg.index.index_save_name)
build_index(sap_model_cfg, sap_model.to(device))
build_index(base_model_cfg, base_model.to(device))
```
## Entity Linking via Nearest Neighbor Search
Now it's time to query our indices! We are going to query both our index built with embeddings from BERT Base, and our index with embeddings built from the SAP BERT model we trained. Our sample query phrases will be "*high blood sugar*" and "*head pain*".
To query our indices, we first need to get the embedding of each query from the corresponding encoder model. We can then pass these query embeddings into the faiss index which will perform a nearest neighbor search, using cosine distance to compare the query embedding with embeddings present in the index. Once we get a list of knowledge base index concept IDs most closely matching our query, all that is left to do is map the IDs to a representative string describing the concept.
```
def query_index(cfg, model, index, queries, id2string):
# Get query embeddings from our entity linking encoder model
query_embs = get_query_embedding(queries, model).cpu().detach().numpy()
# Use query embedding to find closest concept embedding in knowledge base
distances, neighbors = index.search(query_embs, cfg.index.top_n)
# Get the canonical strings corresponding to the IDs of the query's nearest neighbors in the kb
neighbor_concepts = [[id2string[concept_id] for concept_id in query_neighbor] \
for query_neighbor in neighbors]
# Display most similar concepts in the knowledge base.
for query_idx in range(len(queries)):
print(f"\nThe most similar concepts to {queries[query_idx]} are:")
for cid, concept, dist in zip(neighbors[query_idx], neighbor_concepts[query_idx], distances[query_idx]):
print(cid, concept, 1 - dist)
def get_query_embedding(queries, model):
# Tokenize our queries
model_input = model.tokenizer(queries,
add_special_tokens = True,
padding = True,
truncation = True,
max_length = 512,
return_token_type_ids = True,
return_attention_mask = True)
# Pass tokenized input into model
query_emb = model.forward(input_ids=torch.LongTensor(model_input["input_ids"]).to(device),
token_type_ids=torch.LongTensor(model_input["token_type_ids"]).to(device),
attention_mask=torch.LongTensor(model_input["attention_mask"]).to(device))
return query_emb
# Load indices
sap_index = faiss.read_index(sap_model_cfg.index.index_save_name)
base_index = faiss.read_index(base_model_cfg.index.index_save_name)
# Map concept IDs to one canonical string
index_data = open(sap_model_cfg.index.index_ds.data_file, "r", encoding='utf-8-sig')
id2string = {}
for line in index_data:
cid, concept = line.split("\t")
id2string[int(cid) - 1] = concept.strip()
id2string
# Some sample queries
queries = ["high blood sugar", "head pain"]
# Query BERT Base
print("BERT Base output before Self Alignment Pretraining:")
query_index(base_model_cfg, base_model, base_index, queries, id2string)
print("\n" + "-" * 50 + "\n")
# Query SAP BERT
print("SAP BERT output after Self Alignment Pretraining:")
query_index(sap_model_cfg, sap_model, sap_index, queries, id2string)
print("\n" + "-" * 50 + "\n")
```
Even after only training on this tiny amount of data, the qualitative performance boost from self-alignment pretraining is visible. The baseline model links "*high blood sugar*" to the entity "*6 diabetes*" while our SAP BERT model accurately links "*high blood sugar*" to "*Hyperinsulinemia*". Similarly, "*head pain*" and "*Myocardial infraction*" are not the same concept, but "*head pain*" and "*Headache*" are.
For larger knowledge bases keeping the default embedding size might be too large and cause out of memory issues. You can apply PCA or some other dimensionality reduction method to your data to reduce its memory footprint. Code for creating a text file of all the UMLS entities in the correct format needed to build an index and creating a dictionary mapping concept ids to canonical concept strings can be found here `examples/nlp/entity_linking/data/umls_dataset_processing.py`.
The code for extracting knowledge base concept embeddings, training and applying a PCA transformation to the embeddings, building a faiss index and querying the index from the command line is located at `examples/nlp/entity_linking/build_index.py` and `examples/nlp/entity_linking/query_index.py`.
If you've cloned the NeMo repo, both of these steps can be run as follows on the command line from the `examples/nlp/entity_linking/` directory.
```
python data/umls_dataset_processing.py --index
python build_index.py --restore
python query_index.py --restore
```
By default the project directory will be "." but can be changed by adding the flag `--project_dir=<PATH>` after each of the above commands. Intermediate steps of the index building process are saved. In the occurrence of an error, previously completed steps do not need to be rerun.
## Command Recap
Here is a recap of the commands and steps to repeat this process on the full UMLS dataset.
1) Download the UMLS dataset file `MRCONSO.RRF` from the NIH website and place it in the `examples/nlp/entity_linking/data` directory.
2) Run the following commands from the `examples/nlp/entity_linking` directory
```
python data/umls_dataset_processing.py
python self_alignment_pretraining.py project_dir=.
python data/umls_dataset_processing.py --index
python build_index.py --restore
python query_index.py --restore
```
The model will take ~24hrs to train on two GPUs and ~48hrs to train on one GPU. By default the project directory will be "." but can be changed by adding the flag `--project_dir=<PATH>` after each of the above commands and changing `project_dir=<PATH>` in the `self_alignment_pretraining.py` command. If you change the project directory, you should also move the `MRCONOSO.RRF` file to a `data` sub directory within the one you've specified.
As mentioned in the introduction, entity linking within NVIDIA NeMo is not limited to the medical domain. The same data processing and training steps can be applied to a variety of domains and use cases. You can edit the datasets used as well as training and loss function hyperparameters within your config file to better suit your domain.
| github_jupyter |
# Building your Deep Neural Network: Step by Step
You will implement all the building blocks of a neural network and use these building blocks to build a neural network of any architecture you want. By completing this assignment you will:
- Develop an intuition of the over all structure of a neural network.
- Write functions (e.g. forward propagation, backward propagation, logistic loss, etc...) that would help you decompose your code and ease the process of building a neural network.
- Initialize/update parameters according to your desired structure.
<!-- - In this notebook, you will implement all the functions required to build a deep neural network. -->
<!-- - In the next assignment, you will use these functions to build a deep neural network for image classification. -->
**After this assignment you will be able to:**
- Use non-linear units like ReLU to improve your model
- Build a deeper neural network (with more than 1 hidden layer)
- Implement an easy-to-use neural network class
**Notation**:
- Superscript $[l]$ denotes a quantity associated with the $l^{th}$ layer.
- Example: $a^{[L]}$ is the $L^{th}$ layer activation. $W^{[L]}$ and $b^{[L]}$ are the $L^{th}$ layer parameters.
- Superscript $(i)$ denotes a quantity associated with the $i^{th}$ example.
- Example: $x^{(i)}$ is the $i^{th}$ training example.
- Lowerscript $i$ denotes the $i^{th}$ entry of a vector.
- Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the $l^{th}$ layer's activations).
Let's get started!
## 1 - Packages
Let's first import all the packages that you will need during this assignment.
- [numpy](www.numpy.org) is the main package for scientific computing with Python.
- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.
- dnn_utils provides some necessary functions for this notebook.
- testCases provides some test cases to assess the correctness of your functions
- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work. Please don't change the seed.
```
import numpy as np
import h5py
import matplotlib.pyplot as plt
from testCases_v4a import *
from dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
```
## 2 - Outline of the Assignment
To build your neural network, you will be implementing several "helper functions". These helper functions will be used in the notebook "HW1_Part2_Deep_Neural_Network_Application" to build a two-layer neural network and an L-layer neural network. Each small helper function you will implement will have detailed instructions that will walk you through the necessary steps. Here is an outline of this assignment, you will:
- Initialize the parameters for a two-layer network and for an $L$-layer neural network.
- Implement the forward propagation module (shown in purple in the figure below).
- Complete the LINEAR part of a layer's forward propagation step (resulting in $Z^{[l]}$).
- We give you the ACTIVATION function (relu/sigmoid).
- Combine the previous two steps into a new [LINEAR->ACTIVATION] forward function.
- Stack the [LINEAR->RELU] forward function L-1 time (for layers 1 through L-1) and add a [LINEAR->SIGMOID] at the end (for the final layer $L$). This gives you a new L_model_forward function.
- Compute the loss.
- Implement the backward propagation module (denoted in red in the figure below).
- Complete the LINEAR part of a layer's backward propagation step.
- We give you the gradient of the ACTIVATE function (relu_backward/sigmoid_backward)
- Combine the previous two steps into a new [LINEAR->ACTIVATION] backward function.
- Stack [LINEAR->RELU] backward L-1 times and add [LINEAR->SIGMOID] backward in a new L_model_backward function
- Finally update the parameters.
<img src="images/final outline.png" style="width:800px;height:500px;">
<caption><center> **Figure 1**</center></caption><br>
**Note** that for every forward function, there is a corresponding backward function. That is why at every step of your forward module you will be storing some values in a cache. The cached values are useful for computing gradients. In the backpropagation module you will then use the cache to calculate the gradients. This assignment will show you exactly how to carry out each of these steps.
## 3 - Initialization
You will write two helper functions that will initialize the parameters for your model. The first function will be used to initialize parameters for a two layer model. The second one will generalize this initialization process to $L$ layers.
### 3.1 - 2-layer Neural Network
**Exercise**: Create and initialize the parameters of the 2-layer neural network.
**Instructions**:
- The model's structure is: *LINEAR -> RELU -> LINEAR -> SIGMOID*.
- Use random initialization for the weight matrices. Use `np.random.randn(shape)*0.01` with the correct shape.
- Use zero initialization for the biases. Use `np.zeros(shape)`.
```
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
parameters -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(1)
### START CODE HERE ### (≈ 4 lines of code)
W1 = np.random.randn(n_h, n_x)*0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h)*0.01
b2 = np.zeros((n_y, 1))
### END CODE HERE ###
assert(W1.shape == (n_h, n_x))
assert(b1.shape == (n_h, 1))
assert(W2.shape == (n_y, n_h))
assert(b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
parameters = initialize_parameters(3,2,1)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
```
**Expected output**:
<table style="width:80%">
<tr>
<td> **W1** </td>
<td> [[ 0.01624345 -0.00611756 -0.00528172]
[-0.01072969 0.00865408 -0.02301539]] </td>
</tr>
<tr>
<td> **b1**</td>
<td>[[ 0.]
[ 0.]]</td>
</tr>
<tr>
<td>**W2**</td>
<td> [[ 0.01744812 -0.00761207]]</td>
</tr>
<tr>
<td> **b2** </td>
<td> [[ 0.]] </td>
</tr>
</table>
### 3.2 - L-layer Neural Network
The initialization for a deeper L-layer neural network is more complicated because there are many more weight matrices and bias vectors. When completing the `initialize_parameters_deep`, you should make sure that your dimensions match between each layer. Recall that $n^{[l]}$ is the number of units in layer $l$. Thus for example if the size of our input $X$ is $(12288, 209)$ (with $m=209$ examples) then:
<table style="width:100%">
<tr>
<td> </td>
<td> **Shape of W** </td>
<td> **Shape of b** </td>
<td> **Activation** </td>
<td> **Shape of Activation** </td>
<tr>
<tr>
<td> **Layer 1** </td>
<td> $(n^{[1]},12288)$ </td>
<td> $(n^{[1]},1)$ </td>
<td> $Z^{[1]} = W^{[1]} X + b^{[1]} $ </td>
<td> $(n^{[1]},209)$ </td>
<tr>
<tr>
<td> **Layer 2** </td>
<td> $(n^{[2]}, n^{[1]})$ </td>
<td> $(n^{[2]},1)$ </td>
<td>$Z^{[2]} = W^{[2]} A^{[1]} + b^{[2]}$ </td>
<td> $(n^{[2]}, 209)$ </td>
<tr>
<tr>
<td> $\vdots$ </td>
<td> $\vdots$ </td>
<td> $\vdots$ </td>
<td> $\vdots$</td>
<td> $\vdots$ </td>
<tr>
<tr>
<td> **Layer L-1** </td>
<td> $(n^{[L-1]}, n^{[L-2]})$ </td>
<td> $(n^{[L-1]}, 1)$ </td>
<td>$Z^{[L-1]} = W^{[L-1]} A^{[L-2]} + b^{[L-1]}$ </td>
<td> $(n^{[L-1]}, 209)$ </td>
<tr>
<tr>
<td> **Layer L** </td>
<td> $(n^{[L]}, n^{[L-1]})$ </td>
<td> $(n^{[L]}, 1)$ </td>
<td> $Z^{[L]} = W^{[L]} A^{[L-1]} + b^{[L]}$</td>
<td> $(n^{[L]}, 209)$ </td>
<tr>
</table>
Remember that when we compute $W X + b$ in python, it carries out broadcasting. For example, if:
$$ W = \begin{bmatrix}
j & k & l\\
m & n & o \\
p & q & r
\end{bmatrix}\;\;\; X = \begin{bmatrix}
a & b & c\\
d & e & f \\
g & h & i
\end{bmatrix} \;\;\; b =\begin{bmatrix}
s \\
t \\
u
\end{bmatrix}\tag{2}$$
Then $WX + b$ will be:
$$ WX + b = \begin{bmatrix}
(ja + kd + lg) + s & (jb + ke + lh) + s & (jc + kf + li)+ s\\
(ma + nd + og) + t & (mb + ne + oh) + t & (mc + nf + oi) + t\\
(pa + qd + rg) + u & (pb + qe + rh) + u & (pc + qf + ri)+ u
\end{bmatrix}\tag{3} $$
**Exercise**: Implement initialization for an L-layer Neural Network.
**Instructions**:
- The model's structure is *[LINEAR -> RELU] $ \times$ (L-1) -> LINEAR -> SIGMOID*. I.e., it has $L-1$ layers using a ReLU activation function followed by an output layer with a sigmoid activation function.
- Use random initialization for the weight matrices. Use `np.random.randn(shape) * 0.01`.
- Use zeros initialization for the biases. Use `np.zeros(shape)`.
- We will store $n^{[l]}$, the number of units in different layers, in a variable `layer_dims`. For example, the `layer_dims` for the "Multi-layer Perceptron Model" from Lecture 04 would have been [2,4,1]: There were two inputs, one hidden layer with 4 hidden units, and an output layer with 1 output unit. This means `W1`'s shape was (4,2), `b1` was (4,1), `W2` was (1,4) and `b2` was (1,1). Now you will generalize this to $L$ layers!
- Here is the implementation for $L=1$ (one layer neural network). It should inspire you to implement the general case (L-layer neural network).
```python
if L == 1:
parameters["W" + str(L)] = np.random.randn(layer_dims[1], layer_dims[0]) * 0.01
parameters["b" + str(L)] = np.zeros((layer_dims[1], 1))
```
```
def initialize_parameters_deep(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * 0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
### END CODE HERE ###
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
parameters = initialize_parameters_deep([5,4,3])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
```
**Expected output**:
<table style="width:80%">
<tr>
<td> **W1** </td>
<td>[[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388]
[-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218]
[-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034]
[-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]</td>
</tr>
<tr>
<td>**b1** </td>
<td>[[ 0.]
[ 0.]
[ 0.]
[ 0.]]</td>
</tr>
<tr>
<td>**W2** </td>
<td>[[-0.01185047 -0.0020565 0.01486148 0.00236716]
[-0.01023785 -0.00712993 0.00625245 -0.00160513]
[-0.00768836 -0.00230031 0.00745056 0.01976111]]</td>
</tr>
<tr>
<td>**b2** </td>
<td>[[ 0.]
[ 0.]
[ 0.]]</td>
</tr>
</table>
## 4 - Forward propagation module
### 4.1 - Linear Forward
Now that you have initialized your parameters, you will do the forward propagation module. You will start by implementing some basic functions that you will use later when implementing the model. You will complete three functions in this order:
- LINEAR
- LINEAR -> ACTIVATION where ACTIVATION will be either ReLU or Sigmoid.
- [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID (whole model)
The linear forward module (vectorized over all the examples) computes the following equations:
$$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}\tag{4}$$
where $A^{[0]} = X$.
**Exercise**: Build the linear part of forward propagation.
**Reminder**:
The mathematical representation of this unit is $Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}$. You may also find `np.dot()` useful. If your dimensions don't match, printing `W.shape` may help.
```
def linear_forward(A, W, b):
"""
Implement the linear part of a layer's forward propagation.
Arguments:
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns:
Z -- the input of the activation function, also called pre-activation parameter
cache -- a python tuple containing "A", "W" and "b" ; stored for computing the backward pass efficiently
"""
### START CODE HERE ### (≈ 1 line of code)
Z = np.dot(W, A) + b
### END CODE HERE ###
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
A, W, b = linear_forward_test_case()
Z, linear_cache = linear_forward(A, W, b)
print("Z = " + str(Z))
```
**Expected output**:
<table style="width:35%">
<tr>
<td> **Z** </td>
<td> [[ 3.26295337 -1.23429987]] </td>
</tr>
</table>
### 4.2 - Linear-Activation Forward
In this notebook, you will use two activation functions:
- **Sigmoid**: $\sigma(Z) = \sigma(W A + b) = \frac{1}{ 1 + e^{-(W A + b)}}$. We have provided you with the `sigmoid` function. This function returns **two** items: the activation value "`a`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call:
``` python
A, activation_cache = sigmoid(Z)
```
- **ReLU**: The mathematical formula for ReLu is $A = RELU(Z) = max(0, Z)$. We have provided you with the `relu` function. This function returns **two** items: the activation value "`A`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call:
``` python
A, activation_cache = relu(Z)
```
For more convenience, you are going to group two functions (Linear and Activation) into one function (LINEAR->ACTIVATION). Hence, you will implement a function that does the LINEAR forward step followed by an ACTIVATION forward step.
**Exercise**: Implement the forward propagation of the *LINEAR->ACTIVATION* layer. Mathematical relation is: $A^{[l]} = g(Z^{[l]}) = g(W^{[l]}A^{[l-1]} +b^{[l]})$ where the activation "g" can be sigmoid() or relu(). Use linear_forward() and the correct activation function.
```
def linear_activation_forward(A_prev, W, b, activation):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python tuple containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
### END CODE HERE ###
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
### END CODE HERE ###
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
A_prev, W, b = linear_activation_forward_test_case()
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "sigmoid")
print("With sigmoid: A = " + str(A))
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "relu")
print("With ReLU: A = " + str(A))
```
**Expected output**:
<table style="width:35%">
<tr>
<td> **With sigmoid: A ** </td>
<td > [[ 0.96890023 0.11013289]]</td>
</tr>
<tr>
<td> **With ReLU: A ** </td>
<td > [[ 3.43896131 0. ]]</td>
</tr>
</table>
**Note**: In deep learning, the "[LINEAR->ACTIVATION]" computation is counted as a single layer in the neural network, not two layers.
### d) L-Layer Model
For even more convenience when implementing the $L$-layer Neural Net, you will need a function that replicates the previous one (`linear_activation_forward` with RELU) $L-1$ times, then follows that with one `linear_activation_forward` with SIGMOID.
<img src="images/model_architecture_kiank.png" style="width:600px;height:300px;">
<caption><center> **Figure 2** : *[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model</center></caption><br>
**Exercise**: Implement the forward propagation of the above model.
**Instruction**: In the code below, the variable `AL` will denote $A^{[L]} = \sigma(Z^{[L]}) = \sigma(W^{[L]} A^{[L-1]} + b^{[L]})$. (This is sometimes also called `Yhat`, i.e., this is $\hat{Y}$.)
**Tips**:
- Use the functions you had previously written
- Use a for loop to replicate [LINEAR->RELU] (L-1) times
- Don't forget to keep track of the caches in the "caches" list. To add a new value `c` to a `list`, you can use `list.append(c)`.
```
# GRADED FUNCTION: L_model_forward
def L_model_forward(X, parameters):
"""
Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters_deep()
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_activation_forward() (there are L-1 of them, indexed from 0 to L-1)
"""
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for l in range(1, L):
A_prev = A
### START CODE HERE ### (≈ 2 lines of code)
A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], activation= "relu")
caches.append(cache)
### END CODE HERE ###
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
### START CODE HERE ### (≈ 2 lines of code)
AL, cache = linear_activation_forward(A, parameters['W' + str(l+1)], parameters['b' + str(l+1)], activation= "sigmoid")
caches.append(cache)
### END CODE HERE ###
assert(AL.shape == (1,X.shape[1]))
return AL, caches
X, parameters = L_model_forward_test_case_2hidden()
AL, caches = L_model_forward(X, parameters)
print("AL = " + str(AL))
print("Length of caches list = " + str(len(caches)))
```
<table style="width:50%">
<tr>
<td> **AL** </td>
<td > [[ 0.03921668 0.70498921 0.19734387 0.04728177]]</td>
</tr>
<tr>
<td> **Length of caches list ** </td>
<td > 3 </td>
</tr>
</table>
Great! Now you have a full forward propagation that takes the input X and outputs a row vector $A^{[L]}$ containing your predictions. It also records all intermediate values in "caches". Using $A^{[L]}$, you can compute the cost of your predictions.
## 5 - Cost function
Now you will implement forward and backward propagation. You need to compute the cost, because you want to check if your model is actually learning.
**Exercise**: Compute the cross-entropy cost $J$, using the following formula: $$-\frac{1}{m} \sum\limits_{i = 1}^{m} (y^{(i)}\log\left(a^{[L] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right)) \tag{7}$$
```
def compute_cost(AL, Y):
"""
Implement the cost function defined by equation (7).
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
cost -- cross-entropy cost
"""
m = Y.shape[1]
# Compute loss from aL and y.
### START CODE HERE ### (≈ 1 lines of code)
cost = -1/m * np.sum (np.multiply(Y,np.log(AL)) + np.multiply(1-Y,np.log(1-AL)) )
### END CODE HERE ###
cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).
assert(cost.shape == ())
return cost
Y, AL = compute_cost_test_case()
print("cost = " + str(compute_cost(AL, Y)))
```
**Expected Output**:
<table>
<tr>
<td>**cost** </td>
<td> 0.2797765635793422</td>
</tr>
</table>
## 6 - Backward propagation module
Just like with forward propagation, you will implement helper functions for backpropagation. Remember that back propagation is used to calculate the gradient of the loss function with respect to the parameters.
**Reminder**:
<img src="images/backprop_kiank.png" style="width:650px;height:250px;">
<caption><center> **Figure 3** : Forward and Backward propagation for *LINEAR->RELU->LINEAR->SIGMOID* <br> *The purple blocks represent the forward propagation, and the red blocks represent the backward propagation.* </center></caption>
<!--
For those of you who are expert in calculus (you don't need to be to do this assignment), the chain rule of calculus can be used to derive the derivative of the loss $\mathcal{L}$ with respect to $z^{[1]}$ in a 2-layer network as follows:
$$\frac{d \mathcal{L}(a^{[2]},y)}{{dz^{[1]}}} = \frac{d\mathcal{L}(a^{[2]},y)}{{da^{[2]}}}\frac{{da^{[2]}}}{{dz^{[2]}}}\frac{{dz^{[2]}}}{{da^{[1]}}}\frac{{da^{[1]}}}{{dz^{[1]}}} \tag{8} $$
In order to calculate the gradient $dW^{[1]} = \frac{\partial L}{\partial W^{[1]}}$, you use the previous chain rule and you do $dW^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial W^{[1]}}$. During the backpropagation, at each step you multiply your current gradient by the gradient corresponding to the specific layer to get the gradient you wanted.
Equivalently, in order to calculate the gradient $db^{[1]} = \frac{\partial L}{\partial b^{[1]}}$, you use the previous chain rule and you do $db^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial b^{[1]}}$.
This is why we talk about **backpropagation**.
!-->
Now, similar to forward propagation, you are going to build the backward propagation in three steps:
- LINEAR backward
- LINEAR -> ACTIVATION backward where ACTIVATION computes the derivative of either the ReLU or sigmoid activation
- [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID backward (whole model)
### 6.1 - Linear backward
For layer $l$, the linear part is: $Z^{[l]} = W^{[l]} A^{[l-1]} + b^{[l]}$ (followed by an activation).
Suppose you have already calculated the derivative $dZ^{[l]} = \frac{\partial \mathcal{L} }{\partial Z^{[l]}}$. You want to get $(dW^{[l]}, db^{[l]}, dA^{[l-1]})$.
<img src="images/linearback_kiank.png" style="width:250px;height:300px;">
<caption><center> **Figure 4** </center></caption>
The three outputs $(dW^{[l]}, db^{[l]}, dA^{[l-1]})$ are computed using the input $dZ^{[l]}$.Here are the formulas you need:
$$ dW^{[l]} = \frac{\partial \mathcal{J} }{\partial W^{[l]}} = \frac{1}{m} dZ^{[l]} A^{[l-1] T} \tag{8}$$
$$ db^{[l]} = \frac{\partial \mathcal{J} }{\partial b^{[l]}} = \frac{1}{m} \sum_{i = 1}^{m} dZ^{[l](i)}\tag{9}$$
$$ dA^{[l-1]} = \frac{\partial \mathcal{L} }{\partial A^{[l-1]}} = W^{[l] T} dZ^{[l]} \tag{10}$$
**Exercise**: Use the 3 formulas above to implement linear_backward().
```
def linear_backward(dZ, cache):
"""
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
A_prev, W, b = cache
m = A_prev.shape[1]
### START CODE HERE ### (≈ 3 lines of code)
dW = 1/m*np.dot(dZ,A_prev.T)
db = np.sum (dZ, axis=1, keepdims=True)/m
dA_prev = np.dot(W.T, dZ)
### END CODE HERE ###
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
# Set up some test inputs
dZ, linear_cache = linear_backward_test_case()
dA_prev, dW, db = linear_backward(dZ, linear_cache)
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
```
** Expected Output**:
```
dA_prev =
[[-1.15171336 0.06718465 -0.3204696 2.09812712]
[ 0.60345879 -3.72508701 5.81700741 -3.84326836]
[-0.4319552 -1.30987417 1.72354705 0.05070578]
[-0.38981415 0.60811244 -1.25938424 1.47191593]
[-2.52214926 2.67882552 -0.67947465 1.48119548]]
dW =
[[ 0.07313866 -0.0976715 -0.87585828 0.73763362 0.00785716]
[ 0.85508818 0.37530413 -0.59912655 0.71278189 -0.58931808]
[ 0.97913304 -0.24376494 -0.08839671 0.55151192 -0.10290907]]
db =
[[-0.14713786]
[-0.11313155]
[-0.13209101]]
```
### 6.2 - Linear-Activation backward
Next, you will create a function that merges the two helper functions: **`linear_backward`** and the backward step for the activation **`linear_activation_backward`**.
To help you implement `linear_activation_backward`, we provided two backward functions:
- **`sigmoid_backward`**: Implements the backward propagation for SIGMOID unit. You can call it as follows:
```python
dZ = sigmoid_backward(dA, activation_cache)
```
- **`relu_backward`**: Implements the backward propagation for RELU unit. You can call it as follows:
```python
dZ = relu_backward(dA, activation_cache)
```
If $g(.)$ is the activation function,
`sigmoid_backward` and `relu_backward` compute $$dZ^{[l]} = dA^{[l]} * g'(Z^{[l]}) \tag{11}$$.
**Exercise**: Implement the backpropagation for the *LINEAR->ACTIVATION* layer.
```
def linear_activation_backward(dA, cache, activation):
"""
Implement the backward propagation for the LINEAR->ACTIVATION layer.
Arguments:
dA -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
linear_cache, activation_cache = cache
# print (linear_cache)
# print (activation_cache)
if activation == "relu":
### START CODE HERE ### (≈ 2 lines of code)
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
### END CODE HERE ###
elif activation == "sigmoid":
### START CODE HERE ### (≈ 2 lines of code)
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
### END CODE HERE ###
return dA_prev, dW, db
dAL, linear_activation_cache = linear_activation_backward_test_case()
dA_prev, dW, db = linear_activation_backward(dAL, linear_activation_cache, activation = "sigmoid")
print ("sigmoid:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db) + "\n")
dA_prev, dW, db = linear_activation_backward(dAL, linear_activation_cache, activation = "relu")
print ("relu:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
```
**Expected output with sigmoid:**
<table style="width:100%">
<tr>
<td > dA_prev </td>
<td >[[ 0.11017994 0.01105339]
[ 0.09466817 0.00949723]
[-0.05743092 -0.00576154]] </td>
</tr>
<tr>
<td > dW </td>
<td > [[ 0.10266786 0.09778551 -0.01968084]] </td>
</tr>
<tr>
<td > db </td>
<td > [[-0.05729622]] </td>
</tr>
</table>
**Expected output with relu:**
<table style="width:100%">
<tr>
<td > dA_prev </td>
<td > [[ 0.44090989 0. ]
[ 0.37883606 0. ]
[-0.2298228 0. ]] </td>
</tr>
<tr>
<td > dW </td>
<td > [[ 0.44513824 0.37371418 -0.10478989]] </td>
</tr>
<tr>
<td > db </td>
<td > [[-0.20837892]] </td>
</tr>
</table>
### 6.3 - L-Model Backward
Now you will implement the backward function for the whole network. Recall that when you implemented the `L_model_forward` function, at each iteration, you stored a cache which contains (X,W,b, and z). In the back propagation module, you will use those variables to compute the gradients. Therefore, in the `L_model_backward` function, you will iterate through all the hidden layers backward, starting from layer $L$. On each step, you will use the cached values for layer $l$ to backpropagate through layer $l$. Figure 5 below shows the backward pass.
<img src="images/mn_backward.png" style="width:450px;height:300px;">
<caption><center> **Figure 5** : Backward pass </center></caption>
** Initializing backpropagation**:
To backpropagate through this network, we know that the output is,
$A^{[L]} = \sigma(Z^{[L]})$. Your code thus needs to compute `dAL` $= \frac{\partial \mathcal{L}}{\partial A^{[L]}}$.
To do so, use this formula (derived using calculus which you don't need in-depth knowledge of):
```python
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL
```
You can then use this post-activation gradient `dAL` to keep going backward. As seen in Figure 5, you can now feed in `dAL` into the LINEAR->SIGMOID backward function you implemented (which will use the cached values stored by the L_model_forward function). After that, you will have to use a `for` loop to iterate through all the other layers using the LINEAR->RELU backward function. You should store each dA, dW, and db in the grads dictionary. To do so, use this formula :
$$grads["dW" + str(l)] = dW^{[l]}\tag{15} $$
For example, for $l=3$ this would store $dW^{[l]}$ in `grads["dW3"]`.
**Exercise**: Implement backpropagation for the *[LINEAR->RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model.
```
def L_model_backward(AL, Y, caches):
"""
Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group
Arguments:
AL -- probability vector, output of the forward propagation (L_model_forward())
Y -- true "label" vector (containing 0 if non-cat, 1 if cat)
caches -- list of caches containing:
every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)
the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1])
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
### START CODE HERE ### (1 line of code)
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
### END CODE HERE ###
# Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "dAL, current_cache". Outputs: "grads["dAL-1"], grads["dWL"], grads["dbL"]
### START CODE HERE ### (approx. 2 lines)
current_cache = caches[L-1]
grads["dA" + str(L-1)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, activation = "sigmoid")
### END CODE HERE ###
# Loop from l=L-2 to l=0
for l in reversed(range(L-1)):
# lth layer: (RELU -> LINEAR) gradients.
# Inputs: "grads["dA" + str(l + 1)], current_cache". Outputs: "grads["dA" + str(l)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)]
### START CODE HERE ### (approx. 5 lines)
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 1)], current_cache, activation = "relu")
grads["dA" + str(l)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
### END CODE HERE ###
return grads
AL, Y_assess, caches = L_model_backward_test_case()
grads = L_model_backward(AL, Y_assess, caches)
print_grads(grads)
```
**Expected Output**
<table style="width:60%">
<tr>
<td > dW1 </td>
<td > [[ 0.41010002 0.07807203 0.13798444 0.10502167]
[ 0. 0. 0. 0. ]
[ 0.05283652 0.01005865 0.01777766 0.0135308 ]] </td>
</tr>
<tr>
<td > db1 </td>
<td > [[-0.22007063]
[ 0. ]
[-0.02835349]] </td>
</tr>
<tr>
<td > dA1 </td>
<td > [[ 0.12913162 -0.44014127]
[-0.14175655 0.48317296]
[ 0.01663708 -0.05670698]] </td>
</tr>
</table>
### 6.4 - Update Parameters
In this section you will update the parameters of the model, using gradient descent:
$$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{16}$$
$$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{17}$$
where $\alpha$ is the learning rate. After computing the updated parameters, store them in the parameters dictionary.
**Exercise**: Implement `update_parameters()` to update your parameters using gradient descent.
**Instructions**:
Update parameters using gradient descent on every $W^{[l]}$ and $b^{[l]}$ for $l = 1, 2, ..., L$.
```
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
### START CODE HERE ### (≈ 3 lines of code)
for l in range(L):
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate*grads["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate*grads["db" + str(l+1)]
### END CODE HERE ###
return parameters
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads, 0.1)
print ("W1 = "+ str(parameters["W1"]))
print ("b1 = "+ str(parameters["b1"]))
print ("W2 = "+ str(parameters["W2"]))
print ("b2 = "+ str(parameters["b2"]))
```
**Expected Output**:
<table style="width:100%">
<tr>
<td > W1 </td>
<td > [[-0.59562069 -0.09991781 -2.14584584 1.82662008]
[-1.76569676 -0.80627147 0.51115557 -1.18258802]
[-1.0535704 -0.86128581 0.68284052 2.20374577]] </td>
</tr>
<tr>
<td > b1 </td>
<td > [[-0.04659241]
[-1.28888275]
[ 0.53405496]] </td>
</tr>
<tr>
<td > W2 </td>
<td > [[-0.55569196 0.0354055 1.32964895]]</td>
</tr>
<tr>
<td > b2 </td>
<td > [[-0.84610769]] </td>
</tr>
</table>
<!-- ## 7 - Conclusion
Congrats on implementing all the functions required for building a deep neural network!
We know it was a long assignment but going forward it will only get better. The next part of the assignment is easier.
In the next assignment you will put all these together to build two models:
- A two-layer neural network
- An L-layer neural network
-->
| github_jupyter |
# Dissecting Nipype Workflows
<center>
Nipype team | contact: satra@mit.edu | nipy.org/nipype
<br>
(Hit Esc to get an overview)
</center>[Latest version][notebook] | [Latest slideshow][slideshow]
[notebook]: http://nbviewer.ipython.org/urls/raw.github.com/nipy/nipype/master/examples/nipype_tutorial.ipynb
[slideshow]: http://slideviewer.herokuapp.com/url/raw.github.com/nipy/nipype/master/examples/nipype_tutorial.ipynb
# Contributors
http://nipy.org/nipype/about.html#code-contributors
# Funding
- 1R03EB008673-01 from NIBIB, Satrajit Ghosh, Susan Whitfield-Gabrieli
- 5R01MH081909-02 from NIMH, Mark D'Esposito
- INCF
# Conflict of interest
<center>
Satrajit Ghosh: TankThink Labs, LLC
</center>
# What is Nipype?
<center>
<img src="https://raw.github.com/satra/intro2nipype/master/images/nipype.png" width="40%" />
<br>
Figure designed and created by: Arno Klein (www.mindboggle.info)
</center>
# Make life a little easier
<img src="https://raw.github.com/satra/intro2nipype/master/images/EDC.png" />
Poline _et al._ (2012)
# Many workflow systems out there
- [BioImage Suite](http://www.bioimagesuite.org/)
- [BIRN Tools](https://wiki.birncommunity.org/x/LgFrAQ)
- [BrainVisa](http://brainvisa.info)
- [CambaFX](http://www-bmu.psychiatry.cam.ac.uk/software/)
- [JIST for MIPAV](http://www.nitrc.org/projects/jist/)
- [LONI pipeline](http://pipeline.loni.ucla.edu)
- [MEVIS Lab](http://www.mevislab.de)
- [PSOM](http://code.google.com/p/psom/)
# Solution requirements
Coming at it from a developer's perspective, we needed something
- lightweight
- scriptable
- provided formal, common semantics
- allowed interactive exploration
- supported efficient batch processing
- enabled rapid algorithm prototyping
- was flexible and adaptive
- part of an ecosystem
# Python ecosystem
<table width="1024px">
<tr>
<td colspan="2"><a href="http://ipython.org/"><img src="http://ipython.org/_static/IPy_header.png"></a></td>
<td colspan="2"><a href="http://nipy.org/"><img src="http://nipy.org/img/nipy.svg"></a></td>
</tr>
<tr>
<td><a href="http://scipy.org/"><img src="http://www.scipy.org/_static/images/tutorial.png"></a></td>
<td><a href="http://numpy.org/"><img src="http://www.numpy.org/_static/numpy_logo.png"></a></td>
<td><a href="http://pymvpa.org/"><img src="http://www.pymvpa.org/_static/pymvpa_logo.jpg" width="256"></a></td>
<td><a href="http://scikit-learn.org/"><img src="http://scikit-learn.org/stable/_static/scikit-learn-logo-small.png"></a></td>
</tr>
<tr>
<td><a href="http://networkx.github.io/"><img src="https://raw.github.com/networkx/networkx/master/doc/source/static/art1.png" width="256"></a></td>
<td><a href="http://matplotlib.org/"><img src="http://matplotlib.org/_static/logo2.png" width="256"></a></td>
<td><a href="http://code.enthought.com/projects/mayavi/"><img src="http://code.enthought.com/img/mayavi-samp.png" width="256"></a></td>
<td><a href="http://neuro.debian.net/"><img src="http://neuro.debian.net/_files/neurodebian_logo_posters_banner.svg" width="256"></a></td>
</tr>
</table>
# Existing technologies
**shell scripting**:
Can be quick to do, and powerful, but only provides application specific
scalability, and not easy to port across different architectures.
**make/CMake**:
Similar in concept to workflow execution in Nipype, but again limited by the
need for command line tools and flexibility in terms of scaling across
hardware architectures (although see [makeflow](http://nd.edu/~ccl/software/makeflow).
# Existing technologies
**Octave/MATLAB**:
Integration with other tools is *ad hoc* (i.e., system call) and dataflow is
managed at a programmatic level. However, see [PSOM](http://code.google.com/p/psom/) which offers a nice
alternative to some aspects of Nipype for Octave/Matlab users.
**Graphical options**: (e.g., [LONI Pipeline](http://pipeline.loni.ucla.edu), [VisTrails](http://www.vistrails.org/))
Are easy to use but reduces flexibility relative to scripting options.
# Nipype architecture
<img src="https://raw.github.com/satra/intro2nipype/master/images/arch.png" width="100%">
## Concepts
* **Interface**: Wraps a program or function
- **Node/MapNode**: Wraps an `Interface` for use in a Workflow that provides
caching and other goodies (e.g., pseudo-sandbox)
- **Workflow**: A *graph* or *forest of graphs* whose nodes are of type `Node`,
`MapNode` or `Workflow` and whose edges represent data flow
* **Plugin**: A component that describes how a `Workflow` should be executed
# Software interfaces
Currently supported (5-2-2013). [Click here for latest](http://www.mit.edu/~satra/nipype-nightly/documentation.html)
<style>
.rendered_html table{border:0px}
.rendered_html tr{border:0px}
.rendered_html td{border:0px}
</style>
<table>
<tr>
<td>
<ul>
<li><a href="http://afni.nimh.nih.gov/afni">AFNI</a></li>
<li><a href="http://www.picsl.upenn.edu/ANTS">ANTS</a></li>
<li><a href="http://www.psychiatry.uiowa.edu/mhcrc/IPLpages/BRAINS.htm">BRAINS</a></li>
<li><a href="http://www.cs.ucl.ac.uk/research/medic/camino">Camino</a></li>
<li><a href="http://www.nitrc.org/projects/camino-trackvis">Camino-TrackVis</a></li>
<li><a href="http://www.connectomeviewer.org">ConnectomeViewerToolkit</a></li>
<li><a href="http://www.cabiatl.com/mricro/mricron/dcm2nii.html">dcm2nii</a></li>
<li><a href="http://www.trackvis.org/dtk">Diffusion Toolkit</a></li>
</ul>
</td>
<td>
<ul>
<li><a href="http://freesurfer.net">FreeSurfer</a></li>
<li><a href="http://www.fmrib.ox.ac.uk/fsl">FSL</a></li>
<li><a href="http://www.brain.org.au/software/mrtrix/index.html">MRtrx</a></li>
<li><a href="http://nipy.org/nipy">Nipy</a></li>
<li><a href="http://nipy.org/nitime">Nitime</a></li>
<li><a href="http://github.com/pyxnat">PyXNAT</a></li>
<li><a href="http://www.slicer.org">Slicer</a></li>
<li><a href="http://www.fil.ion.ucl.ac.uk/spm">SPM</a></li>
</ul>
</td>
</tr>
</table>
Most used/contributed policy!
Not all components of these packages are available.
# Workflows
- Properties:
- processing pipeline is a directed acyclic graph (DAG)
- nodes are processes
- edges represent data flow
- compact represenation for any process
- code and data separation
# Execution Plugins
Allows seamless execution across many architectures
- Local
- Serial
- Multicore
- Clusters
- HTCondor
- PBS/Torque/SGE/LSF (native and via IPython)
- SSH (via IPython)
- Soma Workflow
# Learn Nipype concepts in 10 easy steps
1. Installing and testing the installation
2. Working with interfaces
3. Using Nipype caching
4. Creating Nodes, MapNodes and Workflows
5. Getting and saving data
6. Using Iterables
7. Function nodes
8. Distributed computation
9. Connecting to databases
10. Execution configuration options
# Step 1. Installing Nipype
## Python environment:
* Debian/Ubuntu/Scientific Fedora
* [Canopy from Enthought](https://www.enthought.com/products/canopy/)
* [Anaconda from Contnuum Analytics](https://store.continuum.io/cshop/anaconda/)
## Installing Nipype:
* Available from [@NeuroDebian](http://neuro.debian.net/pkgs/python-nipype.html),
[@PyPI](http://pypi.python.org/pypi/nipype/), and
[@GitHub](http://github.com/nipy/nipype)
- pip install nipype
- easy_install nipype
- sudo apt-get install python-nipype
* Dependencies: networkx, nibabel, numpy, scipy, traits
## Running Nipype ([Quickstart](http://nipy.org/nipype/quickstart.html)):
* Ensure underlying tools are installed and accessible
* Nipype **is a wrapper, not a substitute** for AFNI, ANTS, FreeSurfer, FSL, SPM,
NiPy, etc.,.
# Step 2. Testing nipype
```
$ jupyter notebook
```
```
import nipype
# Comment the following section to increase verbosity of output
nipype.config.set('logging', 'workflow_level', 'CRITICAL')
nipype.config.set('logging', 'interface_level', 'CRITICAL')
nipype.logging.update_logging(nipype.config)
nipype.test(verbose=0) # Increase verbosity parameter for more info
nipype.get_info()
```
# Step 3: Environment and setup
```
%matplotlib inline
import os
import numpy as np
import matplotlib.pyplot as plt
# download the files from: https://github.com/Neurita/nipype-tutorial-data/archive/master.zip
import os.path as op
tutorial_dir = '/Users/alexandre/nipype-tutorial'
data_dir = op.join(tutorial_dir, 'ds107')
required_files = [op.join(data_dir, 'sub001', 'BOLD', 'task001_run001', 'bold.nii.gz'),
op.join(data_dir, 'sub001', 'BOLD', 'task001_run002', 'bold.nii.gz'),
op.join(data_dir, 'sub044', 'BOLD', 'task001_run001', 'bold.nii.gz'),
op.join(data_dir, 'sub044', 'BOLD', 'task001_run002', 'bold.nii.gz'),
op.join(data_dir, 'sub001', 'anatomy', 'highres001.nii.gz'),
op.join(data_dir, 'sub044', 'anatomy', 'highres001.nii.gz'),
]
print(required_files)
```
# Step 4. Working with interfaces
```
import nipype.algorithms
from nipype.interfaces.fsl import DTIFit
from nipype.interfaces.spm import Realign
```
### Finding interface inputs and outputs and examples
```
DTIFit.help()
Realign.help()
```
### Creating a directory for running interfaces
```
import os
library_dir = os.path.join(tutorial_dir, 'results')
if not os.path.exists(library_dir):
os.mkdir(library_dir)
os.chdir(library_dir)
# pick a demo file
demo_file = required_files[0]
print(demo_file)
# check the current folder
print(op.abspath('.'))
```
## Executing interfaces
```
from nipype.algorithms.misc import Gunzip
convert = Gunzip()
convert.inputs.in_file = demo_file
results = convert.run()
results.outputs
```
## Other ways
```
from nipype.algorithms.misc import Gunzip
convert = Gunzip()
convert.inputs.in_file = demo_file
results = convert.run()
uzip_bold = results.outputs.out_file
print(uzip_bold)
convert = Gunzip()
results = convert.run(in_file=demo_file)
print(results.outputs)
convert.inputs
```
#### Look at only the defined inputs
```
results.inputs
```
### Experiment with other interfaces
For example, run realignment with SPM
```
from nipype.interfaces.spm import Realign
realign = Realign(in_files=uzip_bold, register_to_mean=False)
results1 = realign.run()
#print(os.listdir())
```
And now use FSL
```
from nipype.interfaces.fsl import MCFLIRT
mcflirt = MCFLIRT(in_file=uzip_bold, ref_vol=0, save_plots=True)
results2 = mcflirt.run()
```
### Now we can look at some results
```
print('SPM realign execution time:', results1.runtime.duration)
print('Flirt execution time:', results2.runtime.duration)
!ls
!fslinfo bold.nii
!cat bold_mcf.nii.gz.par
!wc -l bold_mcf.nii.gz.par
!cat rp_bold.txt
!wc -l rp_bold.txt
import numpy as np
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2)
ax[0].plot(np.genfromtxt('bold_mcf.nii.gz.par')[:, 3:])
ax[0].set_title('FSL')
ax[1].plot(np.genfromtxt('rp_bold.txt')[:, :3])
ax[1].set_title('SPM')
```
#### if i execute the MCFLIRT line again, well, it runs again!
# Step 3. Nipype caching
```
from nipype.caching import Memory
mem = Memory('.')
```
### Create `cacheable` objects
```
spm_realign = mem.cache(Realign)
fsl_realign = mem.cache(MCFLIRT)
```
### Execute interfaces
```
spm_results = spm_realign(in_files='ds107.nii', register_to_mean=False)
fsl_results = fsl_realign(in_file='ds107.nii', ref_vol=0, save_plots=True)
fig, ax = plt.subplots(2)
ax[0].plot(np.genfromtxt(fsl_results.outputs.par_file)[:, 3:])
ax[1].plot(np.genfromtxt(spm_results.outputs.realignment_parameters)[:,:3])
```
# More caching
```
files = required_files[0:2]
print(files)
converter = mem.cache(Gunzip)
newfiles = []
for idx, fname in enumerate(files):
results = converter(in_file=fname)
newfiles.append(results.outputs.out_file)
print(newfiles)
os.chdir(tutorial_dir)
```
# Step 4: Nodes, Mapnodes and workflows
```
from nipype.pipeline.engine import Node, MapNode, Workflow
```
**Node**:
```
realign_spm = Node(Realign(), name='motion_correct')
```
**Mapnode**:
<img src="https://raw.github.com/satra/intro2nipype/master/images/mapnode.png" width="30%">
```
convert2nii = MapNode(Gunzip(), iterfield=['in_file'],
name='convert2nii')
```
# "Hello World" of Nipype workflows
```
realignflow = Workflow(name='realign_with_spm')
#realignflow.connect(convert2nii, 'out_file', realign_spm, 'in_files')
realignflow.connect([(convert2nii, realign_spm, [('out_file', 'in_files')]) ])
convert2nii.inputs.in_file = required_files
realign_spm.inputs.register_to_mean = False
realignflow.base_dir = '.'
realignflow.run()
```
# Visualize the workflow
```
realignflow.write_graph()
from IPython.core.display import Image
Image('realign_with_spm/graph.dot.png')
realignflow.write_graph(graph2use='orig')
Image('realign_with_spm/graph_detailed.dot.png')
```
# Step 5. Getting and saving data
### Let's use *glob*
```
cd $tutorial_dir
from nipype.interfaces.io import DataGrabber, DataFinder
ds = Node(DataGrabber(infields=['subject_id'],
outfields=['func']),
name='datasource')
ds.inputs.base_directory = op.join(tutorial_dir, 'ds107')
ds.inputs.template = '%s/BOLD/task001*/bold.nii.gz'
ds.inputs.sort_filelist = True
ds.inputs.subject_id = 'sub001'
print(ds.run().outputs)
ds.inputs.subject_id = ['sub001', 'sub044']
print(ds.run().outputs)
```
# Multiple files per subject
```
ds = Node(DataGrabber(infields=['subject_id', 'task_id'],
outfields=['func', 'anat']),
name='datasource')
ds.inputs.base_directory = os.path.abspath('ds107')
ds.inputs.template = '*'
ds.inputs.template_args = {'func': [['subject_id', 'task_id']],
'anat': [['subject_id']]}
ds.inputs.field_template = {'func': '%s/BOLD/task%03d*/bold.nii.gz',
'anat': '%s/anatomy/highres001.nii.gz'}
ds.inputs.sort_filelist = True
ds.inputs.subject_id = ['sub001', 'sub044']
ds.inputs.task_id = 1
ds_out = ds.run()
print(ds_out.outputs)
```
# Connecting to computation
```
convert2nii = MapNode(Gunzip(), iterfield=['in_file'],
name='convert2nii')
realign_spm = Node(Realign(), name='motion_correct')
realign_spm.inputs.register_to_mean = False
connectedworkflow = Workflow(name='connectedtogether')
connectedworkflow.base_dir = os.path.abspath('working_dir')
connectedworkflow.connect([(ds, convert2nii, [('func', 'in_file')]),
(convert2nii, realign_spm, [('out_file', 'in_files')]),
])
```
# Data sinking
### Take output computed in a workflow out of it.
```
from nipype.interfaces import DataSink
sinker = Node(DataSink(), name='sinker')
sinker.inputs.base_directory = os.path.abspath('output')
connectedworkflow.connect([(realign_spm, sinker, [('realigned_files', 'realigned'),
('realignment_parameters', 'realigned.@parameters'),
]),
])
```
### How to determine output location
'base_directory/container/parameterization/destloc/filename'
destloc = [@]string[[.[@]]string[[.[@]]string]...] and
destloc = realigned.@parameters --> 'realigned'
destloc = realigned.parameters.@1 --> 'realigned/parameters'
destloc = realigned.parameters.@2 --> 'realigned/parameters'
filename comes from the input to the connect statement.
```
connectedworkflow.run?
```
# Step 6: *iterables* - parametric execution
**Workflow + iterables**: runs subgraph several times, attribute not input
<img src="https://raw.github.com/satra/intro2nipype/master/images/iterables.png" width="30%">
```
ds.iterables = ('subject_id', ['sub001', 'sub044'])
connectedworkflow.run()
```
# Putting it all together
### iterables + MapNode + Node + Workflow + DataGrabber + DataSink
```
connectedworkflow.write_graph()
Image('working_dir/connectedtogether/graph.dot.png')
```
# Step 7: The Function interface
### The do anything you want card
```
from nipype.interfaces.utility import Function
def myfunc(input1, input2):
"""Add and subtract two inputs."""
return input1 + input2, input1 - input2
calcfunc = Node(Function(input_names=['input1', 'input2'],
output_names = ['sum', 'difference'],
function=myfunc),
name='mycalc')
calcfunc.inputs.input1 = 1
calcfunc.inputs.input2 = 2
res = calcfunc.run()
print res.outputs
```
# Step 8: Distributed computing
### Normally calling run executes the workflow in series
```
connectedworkflow.run()
```
### but you can scale very easily
For example, to use multiple cores on your local machine
```
connectedworkflow.run('MultiProc', plugin_args={'n_procs': 4})
```
### Or to other job managers
```python
connectedworkflow.run('PBS', plugin_args={'qsub_args': '-q many'})
connectedworkflow.run('SGE', plugin_args={'qsub_args': '-q many'})
connectedworkflow.run('LSF', plugin_args={'qsub_args': '-q many'})
connectedworkflow.run('Condor')
connectedworkflow.run('IPython')
```
### or submit graphs as a whole
```python
connectedworkflow.run('PBSGraph', plugin_args={'qsub_args': '-q many'})
connectedworkflow.run('SGEGraph', plugin_args={'qsub_args': '-q many'})
connectedworkflow.run('CondorDAGMan')
```
### Current Requirement: **SHARED FILESYSTEM**
### You can also set node specific plugin arguments
```python
node.plugin_args = {'qsub_args': '-l nodes=1:ppn=3', 'overwrite': True}
```
# Step 9: Connecting to Databases
```
from os.path import abspath as opap
from nipype.interfaces.io import XNATSource
from nipype.pipeline.engine import Node, Workflow
from nipype.interfaces.fsl import BET
subject_id = 'xnat_S00001'
dg = Node(XNATSource(infields=['subject_id'],
outfields=['struct'],
config='/Users/satra/xnat_configs/nitrc_ir_config'),
name='xnatsource')
dg.inputs.query_template = ('/projects/fcon_1000/subjects/%s/experiments/xnat_E00001'
'/scans/%s/resources/NIfTI/files')
dg.inputs.query_template_args['struct'] = [['subject_id', 'anat_mprage_anonymized']]
dg.inputs.subject_id = subject_id
bet = Node(BET(), name='skull_stripper')
wf = Workflow(name='testxnat')
wf.base_dir = opap('xnattest')
wf.connect(dg, 'struct', bet, 'in_file')
from nipype.interfaces.io import XNATSink
ds = Node(XNATSink(config='/Users/satra/xnat_configs/central_config'),
name='xnatsink')
ds.inputs.project_id = 'NPTEST'
ds.inputs.subject_id = 'NPTEST_xnat_S00001'
ds.inputs.experiment_id = 'test_xnat'
ds.inputs.reconstruction_id = 'bet'
ds.inputs.share = True
wf.connect(bet, 'out_file', ds, 'brain')
wf.run()
```
# Step 10: Configuration options
[Configurable options](http://nipy.org/nipype/users/config_file.html) control workflow and node execution options
At the global level:
```
from nipype import config, logging
config.enable_debug_mode()
logging.update_logging(config)
config.set('execution', 'stop_on_first_crash', 'true')
```
At the workflow level:
```
wf.config['execution']['hash_method'] = 'content'
```
Configurations can also be set at the node level.
```
bet.config = {'execution': {'keep_unnecessary_outputs': 'true'}}
wf.run()
```
# Reusable workflows
```
config.set_default_config()
logging.update_logging(config)
from nipype.workflows.fmri.fsl.preprocess import create_susan_smooth
smooth = create_susan_smooth()
smooth.inputs.inputnode.in_files = opap('output/realigned/_subject_id_sub044/rbold_out.nii')
smooth.inputs.inputnode.fwhm = 5
smooth.inputs.inputnode.mask_file = 'mask.nii'
smooth.run() # Will error because mask.nii does not exist
from nipype.interfaces.fsl import BET, MeanImage, ImageMaths
from nipype.pipeline.engine import Node
remove_nan = Node(ImageMaths(op_string= '-nan'), name='nanremove')
remove_nan.inputs.in_file = op.abspath('output/realigned/_subject_id_sub044/rbold_out.nii')
mi = Node(MeanImage(), name='mean')
mask = Node(BET(mask=True), name='mask')
wf = Workflow('reuse')
wf.base_dir = op.abspath(op.curdir)
wf.connect([(remove_nan, mi, ['out_file', 'in_file']),
(mi, mask, ['out_file', 'in_file']),
(mask, smooth, ['out_file', 'inputnode.mask_file']),
(remove_nan, smooth, ['out_file', 'inputnode.in_files']),
])
wf.run()
```
## Setting internal parameters of workflows
```
print(smooth.list_node_names())
median = smooth.get_node('median')
median.inputs.op_string = '-k %s -p 60'
wf.run()
```
# Summary
- This tutorial covers the concepts of Nipype
1. Installing and testing the installation
2. Working with interfaces
3. Using Nipype caching
4. Creating Nodes, MapNodes and Workflows
5. Getting and saving data
6. Using Iterables
7. Function nodes
8. Distributed computation
9. Connecting to databases
10. Execution configuration options
- It will allow you to reuse and debug the various workflows available in Nipype, BIPS and CPAC
- Please contribute new interfaces and workflows!
| github_jupyter |
```
from google.colab import drive
drive.mount('/content/drive')
import os
print(os.getcwd())
os.chdir('/content/drive/My Drive/Colab Notebooks/summarization')
print(os.listdir())
import os
import numpy as np
import pandas as pd
import sys
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
import keras
orig = os.getcwd()
print(orig)
#Loading Data and Preparing vocab
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
tokenizer = Tokenizer(num_words=sys.maxsize,filters ='', lower=False, oov_token = '<OOV>')
print(dir(tokenizer))
#Initializing tokenizer for Vocabulary
data1 = open('sent').readlines()
data2 = open('summ2').readlines()
tokenizer.fit_on_texts(data1)
tokenizer.fit_on_texts(data2)
print("No. of articles and summ",len(data2),len(data1))
dictionary = tokenizer.word_index
word2idx = {}
idx2word = {}
num_encoder_tokens = len(tokenizer.word_index)+1
num_decoder_tokens = len(tokenizer.word_index)+1
for k, v in dictionary.items():
word2idx[k] = v
idx2word[v] = k
#Encoding data to integers
sent = tokenizer.texts_to_sequences(data1)
summ = tokenizer.texts_to_sequences(data2)
#padding sequences
#Finding the maximum sequence length
MAX_INPUT_LENGTH = max(len(i.split()) for i in data1)
print(MAX_INPUT_LENGTH)
MAX_TARGET_LENGTH = max(len(j.split()) for j in data2)
print(MAX_TARGET_LENGTH)
padded_sent = pad_sequences(sent, maxlen = MAX_INPUT_LENGTH,padding = 'post')
padded_summ = pad_sequences(summ, maxlen = MAX_TARGET_LENGTH,padding = 'post')
print(padded_sent.shape,padded_summ.shape,type(padded_sent))
#preparing training data
encoder_input_data = padded_sent.copy()
decoder_input_data = padded_summ.copy()
# print(decoder_input_data[0],decoder_input_data[1])
decoder_target_data = np.roll(decoder_input_data, -1, axis = -1)
decoder_target_data[:,-1] = 0
# encoder_input_data.reshape(-1,1,MAX_INPUT_LENGTH)
# decoder_input_data = decoder_input_data.reshape(-1,1,MAX_TARGET_LENGTH)
decoder_target_data = decoder_target_data.reshape(-1,MAX_TARGET_LENGTH,1)
# encoder_input_data = tf.one_hot(encoder_input_data, len(tokenizer.word_index))
# decoder_input_data = tf.one_hot(decoder_input_data, len(tokenizer.word_index))
# decoder_target_data = tf.one_hot(decoder_target_data, len(tokenizer.word_index))
print(encoder_input_data.shape,decoder_input_data.shape,decoder_target_data.shape)
# print(decoder_input_data[0],decoder_target_data[0])
# Preparing GloVe
EMBEDDING_DIM = 300
embeddings_index = {}
f = open(os.path.join('', 'glove.6B.{}d.txt'.format(EMBEDDING_DIM)))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
"fishtailed" in embeddings_index
#Embedding matrix
embedding_matrix = np.zeros((len(tokenizer.word_index)+1, EMBEDDING_DIM),dtype='float32')
for word,i in tokenizer.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
print(embedding_matrix.shape)
#Creating the Bidirectional model
from keras.layers import Embedding
from keras.layers import Dense, LSTM, Input, concatenate
from keras.models import Model
batch_size = 32
epochs = 10
HIDDEN_UNITS_ENC = 256
num_samples = 10000
encoder_inputs = Input(shape=(MAX_INPUT_LENGTH,), name='encoder_inputs')
embedding_layer = Embedding(num_encoder_tokens, EMBEDDING_DIM, weights=[embedding_matrix],
input_length=MAX_INPUT_LENGTH, trainable=False, name='embedding_layer')
encoder_rnn = LSTM(units=HIDDEN_UNITS_ENC, return_state=True, dropout=0.5, recurrent_dropout=0.5,name='encoder_lstm')
encoder_output, state_h_f, state_c_f = encoder_rnn(embedding_layer(encoder_inputs))
encoder_rnn2 = LSTM(units=HIDDEN_UNITS_ENC, return_state=True, dropout=0.5, recurrent_dropout=0.5,
go_backwards=True,name='encoder_lstm_backward')
encoder_output, state_h_b, state_c_b = encoder_rnn2(embedding_layer(encoder_inputs))
state_h = concatenate([state_h_f, state_h_b])
state_c = concatenate([state_c_f, state_c_b])
encoder_states = [state_h, state_c]
decoder_inputs = Input(shape=(None,), name='decoder_inputs')
embedding_layer = Embedding(num_decoder_tokens, EMBEDDING_DIM, weights=[embedding_matrix], trainable=False, name='emb_2')
decoder_lstm = LSTM(HIDDEN_UNITS_ENC * 2, return_sequences=True, return_state=True, dropout=0.5,
recurrent_dropout=0.5, name='decoder_lstm')
decoder_outputs, state_h, state_c = decoder_lstm(embedding_layer(decoder_inputs), initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, name='decoder_dense')
decoder_outputs = decoder_dense(decoder_outputs)
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
print(model.summary())
# visualize model structure
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(model, show_shapes=True, show_layer_names=False,
rankdir='TB',dpi=65).create(prog='dot', format='svg'))
model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=['acc'])
model.fit([encoder_input_data,decoder_input_data],decoder_target_data,batch_size = batch_size, epochs = epochs,validation_split=0.9)
model.save('s2s.h5')
from keras.models import load_model
model = load_model('s2s.h5')
#inference step
encoder_model = Model(encoder_inputs, encoder_states)
# encoder_model.summary()
decoder_state_input_h = Input(shape = (HIDDEN_UNITS_ENC*2,))
decoder_state_input_c = Input(shape = (HIDDEN_UNITS_ENC*2,))
decoder_states_inputs = [decoder_state_input_h,decoder_state_input_c]
decoder_output, state_h, state_c = decoder_lstm(embedding_layer(decoder_inputs), initial_state = decoder_states_inputs)
decoder_states = [state_h,state_c]
decoder_outputs = decoder_dense(decoder_output)
decoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states)
decoder_model.summary()
# visualize model structure
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(decoder_model, show_shapes=True, show_layer_names=False,
rankdir='TB',dpi = 70).create(prog='dot', format='svg'))
#decoding sequences
def decode_sequence(input_seq):
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1,1))
target_seq[0, 0] = tokenizer.word_index["<BOS>"]
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0,0])
sampled_char = idx2word[sampled_token_index]
# print(sampled_token_index,end=" ")
decoded_sentence += sampled_char + " "
# Exit condition: either hit max length
# or find stop character.
if (sampled_char == '<EOS>' or
len(decoded_sentence) > MAX_TARGET_LENGTH):
stop_condition = True
# Update the target sequence (of length 1).
target_seq[0, 0] = sampled_token_index
# Update states
states_value = [h, c]
return decoded_sentence
seq = 1
input_seq = encoder_input_data[seq:seq+1]
decoded_sentence = decode_sequence(input_seq)
print('-')
print('Article:', data1[seq].strip())
print('Actual Summary:', data2[seq][5:-5])
print('Predicted Summary:', decoded_sentence)
```
| github_jupyter |
## The goals / steps of this project are the following:
1) Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
2) Apply a distortion correction to raw images.
3) Use color transforms, gradients, etc., to create a thresholded binary image.
4) Apply a perspective transform to rectify binary image ("birds-eye view").
5) Detect lane pixels and fit to find the lane boundary.
6) Determine the curvature of the lane and vehicle position with respect to center.
7) Warp the detected lane boundaries back onto the original image.
8) Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.
Strategy:
- Generate and save transformation matrices to undistort images
- Create undistort function
# 1) Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
The images for camera calibration are stored in the folder called camera_cal. The images in test_images are for testing your pipeline on single frames. If you want to extract more test images from the videos, you can simply use an image writing method like cv2.imwrite(), i.e., you can read the video in frame by frame as usual, and for frames you want to save for later you can write to an image file.
## Saves the relevant transformation matrices in a pickle file for saving time in the next set of codes
```
"""
import numpy as np
import cv2
import glob
import pickle
import matplotlib.pyplot as plt
#%matplotlib notebook
"""A: Finding image and object points"""
def undistort(test_img):
# prepare object points (our ideal reference), like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
# Stores mtx and dist coefficients in a pickle file to use later
nx=9 # Number of inner corners of our chessboard along x axis (or columns)
ny=6 # Number of inner corners of our chessboard along y axis (or rows)
objp = np.zeros((ny*nx,3), np.float32) #We have 9 corners on X axis and 6 corners on Y axis
objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2) # Gives us coorinate points in pairs as a list of 54 items. It's shape will be (54,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space. These are the points for our ideal chessboard which we are using as a reference.
imgpoints = [] # 2d points in image plane. We'll extract these from the images given for caliberating the camera
# Make a list of calibration images
images = glob.glob('camera_cal/calibration*.jpg')
# Step through the list and search for chessboard corners
for idx, fname in enumerate(images):
calib_img = cv2.imread(fname)
gray = cv2.cvtColor(calib_img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
# Grayscale conversion ensures an 8bit image as input.The next function needs that kind of input only. Generally color images are 24 bit images. (Refer "Bits in images" in notes)
ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp) # These will be same for caliberation image. The same points will get appended every time this fires up
imgpoints.append(corners) # Corners
# Draw and display the corners #This step can be completely skipped
cv2.drawChessboardCorners(calib_img, (nx,ny), corners, ret)
write_name = 'corners_found'+str(idx)+'.jpg'
cv2.imwrite('output_files/corners_found_for_calib/'+write_name, calib_img)
cv2.imshow(write_name, calib_img) #We dont want to see the images now so commenting out. TO see output later, un-comment these 3 lines
cv2.waitKey(500) #Delete after testing. These will be used to show you images one after the other
cv2.destroyAllWindows() #Delete this after testing
# Test undistortion on an image
test_img_size = (test_img.shape[1], test_img.shape[0])
# Do camera calibration given object points and image points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, test_img_size,None,None)
# Use the above obtained results to undistort
undist_img = cv2.undistort(test_img, mtx, dist, None, mtx)
cv2.imwrite('output_files/test_undist.jpg',undist_img)
# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)
dist_pickle = {}
dist_pickle["mtx"] = mtx
dist_pickle["dist"] = dist
pickle.dump( dist_pickle, open( "output_files/calib_pickle_files/dist_pickle.p", "wb" ) )
#undist_img = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
return undist_img
test_img= cv2.imread('camera_cal/calibration1.jpg') #Note: Your image will be in BGR format
output=undistort(test_img)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10)) #Refer subplots in python libraries
ax1.imshow(test_img)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(output)
ax2.set_title('Undistorted Image', fontsize=30)
cv2.waitKey(500)
cv2.destroyAllWindows()
"""
```
# 2) Apply a distortion correction to raw images
Now we'll use the transformation matrices stored in the pickle file above and try undistorting example images
Precaution: If you're reading colored image with cv2, convert it to RGB from BGR before using ax.imshow().
Reason: It requred a RGB image if it is 3D
So I'm leaving a comment in my *"cal_undistort function"* to do the conversion in case you use cv2 to read frames and plan to output using ax.imshow()
```
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def cal_undistort(img):
# Reads mtx and dist matrices, peforms image distortion correction and returns the undistorted image
import pickle
# Read in the saved matrices
my_dist_pickle = pickle.load( open( "output_files/calib_pickle_files/dist_pickle.p", "rb" ) )
mtx = my_dist_pickle["mtx"]
dist = my_dist_pickle["dist"]
img_size = (img.shape[1], img.shape[0])
undistorted_img = cv2.undistort(img, mtx, dist, None, mtx)
#undistorted_img = cv2.cvtColor(undistorted_img, cv2.COLOR_BGR2RGB) #Use if you use cv2 to import image. ax.imshow() needs RGB image
return undistorted_img
def draw_subplot(img1,name1,img2,name2):
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img1) #Needs an RGB image for 3D images. For 2D images, it auto-colors them so use cmap='gray' to get grayscale if needed
ax1.set_title(name1, fontsize=50)
ax2.imshow(img2)
ax2.set_title(name2, fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# Read in an image
img = mpimg.imread('test_images/test2.jpg') # highway image
#img = mpimg.imread('camera_cal/calibration3.jpg') # chessboard image
undistorted = cal_undistort(img)
draw_subplot(img,"OG image",undistorted,"Undist image")
print("To note the changes, look carefully at the outer boundary of both the images")
```
# 3) Use color transforms, gradients, etc., to create a thresholded binary image.
Caution: In the thresh_img() function, we begin by coverting our color space from RGB to HLS. We need to check whether our image was RGB or BGR when it was extracted from the frame?
Note: Put undistorted RGB images in this function
```
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def thresh_img(img):
"""
x gradient will identify lanes far away from us
Saturation channel will help us with the lanes near us. This will help if there's a lot of light
"""
"""Starting with color channel"""
# Convert to HLS color space and separate the S channel
# Note: img is the undistorted image
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:,:,2]
h_channel = hls[:,:,0]
# Threshold color channel
s_thresh_min = 170
s_thresh_max = 255
h_thresh_min = 21
h_thresh_max = 22
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh_min) & (s_channel <= s_thresh_max)] = 1
h_binary = np.zeros_like(h_channel)
h_binary[(h_channel >= h_thresh_min) & (h_channel <= h_thresh_max)] = 1
"""Now handling the x gradient"""
# Grayscale image
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Sobel x
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
thresh_min = 20
thresh_max = 100
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
# Combine the two binary thresholds
combined_binary = np.zeros_like(sxbinary)
combined_binary[((s_binary == 1) & (h_binary == 1)) | (sxbinary == 1)] = 1
#Used h as well so as to reduce noise in the image
out_img = np.dstack((combined_binary, combined_binary, combined_binary))*255
#return combined_binary
return out_img
```
# 4) Apply a perspective transform to rectify binary image ("birds-eye view")
```
def perspective_transform(img):
# Define calibration box in source (original) and destination (desired or warped) coordinates
img_size = (img.shape[1], img.shape[0])
"""Notice the format used for img_size. Yaha bhi ulta hai. x axis aur fir y axis chahiye.
Apne format mein rows(y axis) and columns (x axis) hain"""
# Four source coordinates
src = np.array(
[[437*img.shape[1]/960, 331*img.shape[0]/540],
[523*img.shape[1]/960, 331*img.shape[0]/540],
[850*img.shape[1]/960, img.shape[0]],
[145*img.shape[1]/960, img.shape[0]]], dtype='f')
# Next, we'll define a desired rectangle plane for the warped image.
# We'll choose 4 points where we want source points to end up
# This time we'll choose our points by eyeballing a rectangle
dst = np.array(
[[290*img.shape[1]/960, 0],
[740*img.shape[1]/960, 0],
[740*img.shape[1]/960, img.shape[0]],
[290*img.shape[1]/960, img.shape[0]]], dtype='f')
#Compute the perspective transform, M, given source and destination points:
M = cv2.getPerspectiveTransform(src, dst)
#Warp an image using the perspective transform, M; using linear interpolation
#Interpolating points is just filling in missing points as it warps an image
# The input image for this function can be a colored image too
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
return warped,src,dst
```
# Master Pipeline
```
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def process_image(frame):
def cal_undistort(img):
# Reads mtx and dist matrices, peforms image distortion correction and returns the undistorted image
import pickle
# Read in the saved matrices
my_dist_pickle = pickle.load( open( "output_files/calib_pickle_files/dist_pickle.p", "rb" ) )
mtx = my_dist_pickle["mtx"]
dist = my_dist_pickle["dist"]
img_size = (img.shape[1], img.shape[0])
undistorted_img = cv2.undistort(img, mtx, dist, None, mtx)
#undistorted_img = cv2.cvtColor(undistorted_img, cv2.COLOR_BGR2RGB) #Use if you use cv2 to import image. ax.imshow() needs RGB image
return undistorted_img
def yellow_threshold(img, sxbinary):
# Convert to HLS color space and separate the S channel
# Note: img is the undistorted image
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:,:,2]
h_channel = hls[:,:,0]
# Threshold color channel
s_thresh_min = 100
s_thresh_max = 255
#for 360 degree, my value for yellow ranged between 35 and 50. So uska half kar diya
h_thresh_min = 10
h_thresh_max = 25
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh_min) & (s_channel <= s_thresh_max)] = 1
h_binary = np.zeros_like(h_channel)
h_binary[(h_channel >= h_thresh_min) & (h_channel <= h_thresh_max)] = 1
# Combine the two binary thresholds
yellow_binary = np.zeros_like(s_binary)
yellow_binary[(((s_binary == 1) | (sxbinary == 1) ) & (h_binary ==1))] = 1
return yellow_binary
def xgrad_binary(img, thresh_min=30, thresh_max=100):
# Grayscale image
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Sobel x
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
#thresh_min = 30 #Already given above
#thresh_max = 100
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
return sxbinary
def white_threshold(img, sxbinary, lower_white_thresh = 170):
r_channel = img[:,:,0]
g_channel = img[:,:,1]
b_channel = img[:,:,2]
# Threshold color channel
r_thresh_min = lower_white_thresh
r_thresh_max = 255
r_binary = np.zeros_like(r_channel)
r_binary[(r_channel >= r_thresh_min) & (r_channel <= r_thresh_max)] = 1
g_thresh_min = lower_white_thresh
g_thresh_max = 255
g_binary = np.zeros_like(g_channel)
g_binary[(g_channel >= g_thresh_min) & (g_channel <= g_thresh_max)] = 1
b_thresh_min = lower_white_thresh
b_thresh_max = 255
b_binary = np.zeros_like(b_channel)
b_binary[(b_channel >= b_thresh_min) & (b_channel <= b_thresh_max)] = 1
white_binary = np.zeros_like(r_channel)
white_binary[((r_binary ==1) & (g_binary ==1) & (b_binary ==1) & (sxbinary==1))] = 1
return white_binary
def thresh_img(img):
#sxbinary = xgrad_binary(img, thresh_min=30, thresh_max=100)
sxbinary = xgrad_binary(img, thresh_min=25, thresh_max=130)
yellow_binary = yellow_threshold(img, sxbinary) #(((s) | (sx)) & (h))
white_binary = white_threshold(img, sxbinary, lower_white_thresh = 150)
# Combine the two binary thresholds
combined_binary = np.zeros_like(sxbinary)
combined_binary[((yellow_binary == 1) | (white_binary == 1))] = 1
out_img = np.dstack((combined_binary, combined_binary, combined_binary))*255
return out_img
def perspective_transform(img):
# Define calibration box in source (original) and destination (desired or warped) coordinates
img_size = (img.shape[1], img.shape[0])
"""Notice the format used for img_size. Yaha bhi ulta hai. x axis aur fir y axis chahiye.
Apne format mein rows(y axis) and columns (x axis) hain"""
# Four source coordinates
# Order of points: top left, top right, bottom right, bottom left
src = np.array(
[[435*img.shape[1]/960, 350*img.shape[0]/540],
[535*img.shape[1]/960, 350*img.shape[0]/540],
[885*img.shape[1]/960, img.shape[0]],
[220*img.shape[1]/960, img.shape[0]]], dtype='f')
# Next, we'll define a desired rectangle plane for the warped image.
# We'll choose 4 points where we want source points to end up
# This time we'll choose our points by eyeballing a rectangle
dst = np.array(
[[290*img.shape[1]/960, 0],
[740*img.shape[1]/960, 0],
[740*img.shape[1]/960, img.shape[0]],
[290*img.shape[1]/960, img.shape[0]]], dtype='f')
#Compute the perspective transform, M, given source and destination points:
M = cv2.getPerspectiveTransform(src, dst)
#Warp an image using the perspective transform, M; using linear interpolation
#Interpolating points is just filling in missing points as it warps an image
# The input image for this function can be a colored image too
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
return warped, src, dst
def draw_polygon(img1, img2, src, dst):
src = src.astype(int) #Very important step (Pixels cannot be in decimals)
dst = dst.astype(int)
cv2.polylines(img1, [src], True, (255,0,0), 3)
cv2.polylines(img2, [dst], True, (255,0,0), 3)
def histogram_bottom_peaks (warped_img):
# This will detect the bottom point of our lane lines
# Take a histogram of the bottom half of the image
bottom_half = warped_img[(warped_img.shape[0]//2):,:,0] # Collecting all pixels in the bottom half
histogram = np.sum(bottom_half, axis=0) # Summing them along y axis (or along columns)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2) # 1D array hai histogram toh uska bas 0th index filled hoga
#print(np.shape(histogram)) #OUTPUT:(1280,)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
return leftx_base, rightx_base
def find_lane_pixels(warped_img):
leftx_base, rightx_base = histogram_bottom_peaks(warped_img)
# Create an output image to draw on and visualize the result
out_img = np.copy(warped_img)
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = 9
# Set the width of the windows +/- margin. So width = 2*margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Set height of windows - based on nwindows above and image shape
window_height = np.int(warped_img.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = warped_img.nonzero() #pixel ke coordinates dega 2 seperate arrays mein
nonzeroy = np.array(nonzero[0]) # Y coordinates milenge 1D array mein. They will we arranged in the order of pixels
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base #initially set kar diya hai. For loop ke end mein change karenge
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = [] # Ismein lane-pixels ke indices collect karenge.
# 'nonzerox' array mein index daalke coordinate mil jaayega
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = warped_img.shape[0] - (window+1)*window_height
win_y_high = warped_img.shape[0] - window*window_height
"""### TO-DO: Find the four below boundaries of the window ###"""
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),
(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),
(win_xright_high,win_y_high),(0,255,0), 2)
### TO-DO: Identify the nonzero pixels in x and y within the window ###
#Iska poora explanation seperate page mein likha hai
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on the mean position of the pixels in your current window (re-centre)
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
#return leftx, lefty, rightx, righty, out_img
return out_img
undist_img = cal_undistort(frame)
thresh_img = thresh_img(undist_img) # Note: This is not a binary iamge. It has been stacked already within the function
warped_img, src, dst = perspective_transform(thresh_img)
draw_polygon(frame, warped_img, src, dst) #the first image is the original image that you import into the system
lane_identified = find_lane_pixels(warped_img)
#return thresh_img, warped_img #3 images dekhne ke liye ye return
return warped_img, lane_identified #video chalane ke liye ye return
#return lane_identified
```
# 5) Detect lane pixels and fit to find the lane boundary.
```
#image = mpimg.imread("my_test_images/starter.JPG")
#image = mpimg.imread("my_test_images/straight_road.JPG") #top left corner thoda right
image = mpimg.imread("my_test_images/change_road_color.JPG") #too less data points in right lane
#image = mpimg.imread("my_test_images/leaving_tree_to_road_color_change.JPG")
#image = mpimg.imread("my_test_images/tree_and_color_change.JPG")
#image = mpimg.imread("my_test_images/trees_left_lane_missing.JPG")
#image = mpimg.imread("my_test_images/trees_left_lane_missing2.JPG")
#image = mpimg.imread("my_test_images/1.JPG")
#image = mpimg.imread("my_test_images/2.JPG") #too less data points in right lane
#image = mpimg.imread("my_test_images/3.JPG") #too less points in right lane
#image = mpimg.imread("my_test_images/4.JPG")
#image = mpimg.imread("my_test_images/finding_hue.JPG")
#image = mpimg.imread("my_test_images/finding_hue2.JPG") #ismein yellow bohot kam ho gaya ab
thresh_img, warped_img=process_image(image)
def draw_subplot(img1,name1,img2,name2, img3,name3):
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img1) #Needs an RGB image for 3D images. For 2D images, it auto-colors them so use cmap='gray' to get grayscale if needed
ax1.set_title(name1, fontsize=50)
ax2.imshow(img2) #Needs an RGB image for 3D images. For 2D images, it auto-colors them so use cmap='gray' to get grayscale if needed
ax2.set_title(name2, fontsize=50)
ax3.imshow(img3)
ax3.set_title(name3, fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
#draw_subplot(image,"OG",output,"lala lala image")
draw_subplot(image, "OG image",thresh_img,"Thresh_img",warped_img,"Bird eye's view")
```
Remember to atleast stack binary images to form color images.
## Reset
If your sanity checks reveal that the lane lines you've detected are problematic for some reason, you can simply assume it was a bad or difficult frame of video, retain the previous positions from the frame prior and step to the next frame to search again. If you lose the lines for several frames in a row, you should probably start searching from scratch using a histogram and sliding window, or another method, to re-establish your measurement.
## Smoothing
Even when everything is working, your line detections will jump around from frame to frame a bit and it can be preferable to smooth over the last n frames of video to obtain a cleaner result. Each time you get a new high-confidence measurement, you can append it to the list of recent measurements and then take an average over n past measurements to obtain the lane position you want to draw onto the image.
# Project video
```
project_output = 'output_files/video_clips/project_video.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
clip1 = VideoFileClip("project_video.mp4")
#clip1 = VideoFileClip("project_video.mp4").subclip(0,1)
project_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!
%time project_clip.write_videofile(project_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(project_output))
# challenge video
challenge_output = 'output_files/video_clips/challenge_video_old.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
clip2 = VideoFileClip("challenge_video.mp4")
#clip2 = VideoFileClip("challenge_video.mp4").subclip(0,1)
challenge_clip = clip2.fl_image(process_image) #NOTE: this function expects color images!
%time challenge_clip.write_videofile(challenge_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(challenge_output))
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/2_transfer_learning_roadmap/6_freeze_base_network/1.1)%20Understand%20the%20effect%20of%20freezing%20base%20model%20in%20transfer%20learning%20-%201%20-%20mxnet.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Goals
### Understand the role of freezing models in transfer learning
### Why freeze/unfreeze base models in transfer learning
### Use comparison feature to appropriately set this parameter on custom dataset
### You will be using lego bricks dataset to train the classifiers
# What is freezing base network
- To recap you have two parts in your network
- One that already existed, the pretrained one, the base network
- The new sub-network or a single layer you added
-The hyper-parameter we can see here: Freeze base network
- Freezing base network makes the base network untrainable
- The base network now acts as a feature extractor and only the next half is trained
- If you do not freeze the base network the entire network is trained
# Table of Contents
## [Install](#0)
## [Freeze Base network in densenet121 and train a classifier](#1)
## [Unfreeze base network in densenet121 and train another classifier](#2)
## [Compare both the experiment](#3)
<a id='0'></a>
# Install Monk
## Using pip (Recommended)
- colab (gpu)
- All bakcends: `pip install -U monk-colab`
- kaggle (gpu)
- All backends: `pip install -U monk-kaggle`
- cuda 10.2
- All backends: `pip install -U monk-cuda102`
- Gluon bakcned: `pip install -U monk-gluon-cuda102`
- Pytorch backend: `pip install -U monk-pytorch-cuda102`
- Keras backend: `pip install -U monk-keras-cuda102`
- cuda 10.1
- All backend: `pip install -U monk-cuda101`
- Gluon bakcned: `pip install -U monk-gluon-cuda101`
- Pytorch backend: `pip install -U monk-pytorch-cuda101`
- Keras backend: `pip install -U monk-keras-cuda101`
- cuda 10.0
- All backend: `pip install -U monk-cuda100`
- Gluon bakcned: `pip install -U monk-gluon-cuda100`
- Pytorch backend: `pip install -U monk-pytorch-cuda100`
- Keras backend: `pip install -U monk-keras-cuda100`
- cuda 9.2
- All backend: `pip install -U monk-cuda92`
- Gluon bakcned: `pip install -U monk-gluon-cuda92`
- Pytorch backend: `pip install -U monk-pytorch-cuda92`
- Keras backend: `pip install -U monk-keras-cuda92`
- cuda 9.0
- All backend: `pip install -U monk-cuda90`
- Gluon bakcned: `pip install -U monk-gluon-cuda90`
- Pytorch backend: `pip install -U monk-pytorch-cuda90`
- Keras backend: `pip install -U monk-keras-cuda90`
- cpu
- All backend: `pip install -U monk-cpu`
- Gluon bakcned: `pip install -U monk-gluon-cpu`
- Pytorch backend: `pip install -U monk-pytorch-cpu`
- Keras backend: `pip install -U monk-keras-cpu`
## Install Monk Manually (Not recommended)
### Step 1: Clone the library
- git clone https://github.com/Tessellate-Imaging/monk_v1.git
### Step 2: Install requirements
- Linux
- Cuda 9.0
- `cd monk_v1/installation/Linux && pip install -r requirements_cu90.txt`
- Cuda 9.2
- `cd monk_v1/installation/Linux && pip install -r requirements_cu92.txt`
- Cuda 10.0
- `cd monk_v1/installation/Linux && pip install -r requirements_cu100.txt`
- Cuda 10.1
- `cd monk_v1/installation/Linux && pip install -r requirements_cu101.txt`
- Cuda 10.2
- `cd monk_v1/installation/Linux && pip install -r requirements_cu102.txt`
- CPU (Non gpu system)
- `cd monk_v1/installation/Linux && pip install -r requirements_cpu.txt`
- Windows
- Cuda 9.0 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu90.txt`
- Cuda 9.2 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu92.txt`
- Cuda 10.0 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu100.txt`
- Cuda 10.1 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu101.txt`
- Cuda 10.2 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu102.txt`
- CPU (Non gpu system)
- `cd monk_v1/installation/Windows && pip install -r requirements_cpu.txt`
- Mac
- CPU (Non gpu system)
- `cd monk_v1/installation/Mac && pip install -r requirements_cpu.txt`
- Misc
- Colab (GPU)
- `cd monk_v1/installation/Misc && pip install -r requirements_colab.txt`
- Kaggle (GPU)
- `cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt`
### Step 3: Add to system path (Required for every terminal or kernel run)
- `import sys`
- `sys.path.append("monk_v1/");`
## Dataset - LEGO Classification
- https://www.kaggle.com/joosthazelzet/lego-brick-images/
```
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1RB_f2Kv3vkBXcQnCSVqCvaZFBHizQacl' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1RB_f2Kv3vkBXcQnCSVqCvaZFBHizQacl" -O LEGO.zip && rm -rf /tmp/cookies.txt
! unzip -qq LEGO.zip
if os.path.isfile("LEGO/train/.DS_Store"):
os.system("rm LEGO/train/.DS_Store");
```
# Imports
```
#Using mxnet-gluon backend
# When installed using pip
from monk.gluon_prototype import prototype
# When installed manually (Uncomment the following)
#import os
#import sys
#sys.path.append("monk_v1/");
#sys.path.append("monk_v1/monk/");
#from monk.gluon_prototype import prototype
```
<a id='1'></a>
# Freeze Base network in densenet121 and train a classifier
## Creating and managing experiments
- Provide project name
- Provide experiment name
- For a specific data create a single project
- Inside each project multiple experiments can be created
- Every experiment can be have diferent hyper-parameters attached to it
```
gtf = prototype(verbose=1);
gtf.Prototype("Project", "Freeze_Base_Network");
```
### This creates files and directories as per the following structure
workspace
|
|--------Project
|
|
|-----Freeze_Base_Network
|
|-----experiment-state.json
|
|-----output
|
|------logs (All training logs and graphs saved here)
|
|------models (all trained models saved here)
## Set dataset and select the model
## Quick mode training
- Using Default Function
- dataset_path
- model_name
- freeze_base_network
- num_epochs
## Sample Dataset folder structure
parent_directory
|
|
|------cats
|
|------img1.jpg
|------img2.jpg
|------.... (and so on)
|------dogs
|
|------img1.jpg
|------img2.jpg
|------.... (and so on)
## Modifyable params
- dataset_path: path to data
- model_name: which pretrained model to use
- freeze_base_network: Retrain already trained network or not
- num_epochs: Number of epochs to train for
```
gtf.Default(dataset_path="LEGO/train",
model_name="densenet121",
freeze_base_network=True, # Set this param as true
num_epochs=5);
#Read the summary generated once you run this cell.
```
## From the summary above
- Model Params
Model name: densenet121
Use Gpu: True
Use pretrained: True
Freeze base network: True
## Another thing to notice from summary
Model Details
Loading pretrained model
Model Loaded on device
Model name: densenet121
Num of potentially trainable layers: 242
Num of actual trainable layers: 1
### There are a total of 242 layers
### Since we have freezed base network only 1 is trainable, the final layer
## Train the classifier
```
#Start Training
gtf.Train();
#Read the training summary generated once you run the cell and training is completed
```
## Validating the trained classifier
## Load the experiment in validation mode
- Set flag eval_infer as True
```
gtf = prototype(verbose=1);
gtf.Prototype("Project", "Freeze_Base_Network", eval_infer=True);
```
## Load the validation dataset
```
gtf.Dataset_Params(dataset_path="LEGO/valid");
gtf.Dataset();
```
## Run validation
```
accuracy, class_based_accuracy = gtf.Evaluate();
```
### Accuracy achieved - 86.063
(You may get a different result)
<a id='2'></a>
# Unfreeze Base network in densenet121 and train a classifier
## Creating and managing experiments
- Provide project name
- Provide experiment name
- For a specific data create a single project
- Inside each project multiple experiments can be created
- Every experiment can be have diferent hyper-parameters attached to it
```
gtf = prototype(verbose=1);
gtf.Prototype("Project", "Unfreeze_Base_Network");
```
### This creates files and directories as per the following structure
workspace
|
|--------Project
|
|
|-----Freeze_Base_Network (Previously created)
|
|-----experiment-state.json
|
|-----output
|
|------logs (All training logs and graphs saved here)
|
|------models (all trained models saved here)
|
|
|-----Unfreeze_Base_Network (Created Now)
|
|-----experiment-state.json
|
|-----output
|
|------logs (All training logs and graphs saved here)
|
|------models (all trained models saved here)
## Set dataset and select the model
## Quick mode training
- Using Default Function
- dataset_path
- model_name
- freeze_base_network
- num_epochs
## Sample Dataset folder structure
parent_directory
|
|
|------cats
|
|------img1.jpg
|------img2.jpg
|------.... (and so on)
|------dogs
|
|------img1.jpg
|------img2.jpg
|------.... (and so on)
## Modifyable params
- dataset_path: path to data
- model_name: which pretrained model to use
- freeze_base_network: Retrain already trained network or not
- num_epochs: Number of epochs to train for
```
gtf.Default(dataset_path="LEGO/train",
model_name="densenet121",
freeze_base_network=False, # Set this param as false
num_epochs=5);
#Read the summary generated once you run this cell.
```
## From the summary above
- Model Params
Model name: densenet121
Use Gpu: True
Use pretrained: True
Freeze base network: False
## Another thing to notice from summary
Model Details
Loading pretrained model
Model Loaded on device
Model name: densenet121
Num of potentially trainable layers: 242
Num of actual trainable layers: 242
### There are a total of 242 layers
### Since we have unfreezed base network all 242 layers are trainable including the final layer
## Train the classifier
```
#Start Training
gtf.Train();
#Read the training summary generated once you run the cell and training is completed
```
## Validating the trained classifier
## Load the experiment in validation mode
- Set flag eval_infer as True
```
gtf = prototype(verbose=1);
gtf.Prototype("Project", "Unfreeze_Base_Network", eval_infer=True);
```
## Load the validation dataset
```
gtf.Dataset_Params(dataset_path="LEGO/valid");
gtf.Dataset();
```
## Run validation
```
accuracy, class_based_accuracy = gtf.Evaluate();
```
### Accuracy achieved - 99.31
(You may get a different result)
<a id='3'></a>
# Compare both the experiment
```
# Invoke the comparison class
from monk.compare_prototype import compare
```
### Creating and managing comparison experiments
- Provide project name
```
# Create a project
gtf = compare(verbose=1);
gtf.Comparison("Compare-effect-of-freezing");
```
### This creates files and directories as per the following structure
workspace
|
|--------comparison
|
|
|-----Compare-effect-of-freezing
|
|------stats_best_val_acc.png
|------stats_max_gpu_usage.png
|------stats_training_time.png
|------train_accuracy.png
|------train_loss.png
|------val_accuracy.png
|------val_loss.png
|
|-----comparison.csv (Contains necessary details of all experiments)
### Add the experiments
- First argument - Project name
- Second argument - Experiment name
```
gtf.Add_Experiment("Project", "Freeze_Base_Network");
gtf.Add_Experiment("Project", "Unfreeze_Base_Network");
```
### Run Analysis
```
gtf.Generate_Statistics();
```
## Visualize and study comparison metrics
### Training Accuracy Curves
```
from IPython.display import Image
Image(filename="workspace/comparison/Compare-effect-of-freezing/train_accuracy.png")
```
### Training Loss Curves
```
from IPython.display import Image
Image(filename="workspace/comparison/Compare-effect-of-freezing/train_loss.png")
```
### Validation Accuracy Curves
```
from IPython.display import Image
Image(filename="workspace/comparison/Compare-effect-of-freezing/val_accuracy.png")
```
### Validation loss curves
```
from IPython.display import Image
Image(filename="workspace/comparison/Compare-effect-of-freezing/val_loss.png")
```
## Accuracies achieved on validation dataset
### With freezing base network - 86.063
### Without freezing base network - 99.31
#### For this classifier, keeping the base network trainable seems to be a good option. Thus for other data it may result in overfitting the training data
(You may get a different result)
# Goals Completed
### Understand the role of freezing models in transfer learning
### Why freeze/unfreeze base models in transfer learning
### Use comparison feature to appropriately set this parameter on custom dataset
| github_jupyter |
```
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
%matplotlib inline
```
### Import the image to be processed
```
image = mpimg.imread('test_images/solidYellowCurve.jpg')
print('This image is: ',type(image), 'with dimensions: ', image.shape)
plt.imshow(image)
```
### Some Helper functions :
* this block contains multiple funtions which have been created for ease of final pipeline for this project.
```
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def average_intercept_slope(lines):
"""
This method help us to define left lane and right lane by calculating slope and intercept of lines.
"""
left_line = []
left_len = []
right_line = []
right_len = []
for line in lines:
for x1, y1, x2, y2 in line:
if x2 == x1:
continue # this will ignore a vertical line which will result in infinite slope
m = (y2 - y1) / (x2 - x1)
b = y1 - m * x1
length = np.sqrt((y2 - y1)**2 + (x2 - x1)**2)
if m < 0:
left_line.append((m, b))
left_len.append(length)
else:
right_line.append((m, b))
right_len.append(length)
left_lane = np.dot(left_len, left_line) / np.sum(left_len) if len(left_len) > 0 else None
right_lane = np.dot(right_len, right_line) / np.sum(right_len) if len(right_len) > 0 else None
return left_lane, right_lane
def line_points(y1, y2, line):
"""
Convert a line represented in slope and intercept into pixel points
"""
if line is None:
return None
m, b = line
x1 = int((y1 - b) / m)
x2 = int((y2 - b) / m)
y1 = int(y1)
y2 = int(y2)
return ((x1, y1), (x2, y2))
def draw_lines(img, lines, color=[0, 0, 255], thickness=10):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
left_lane, right_lane = average_intercept_slope(lines)
y1 = img.shape[0] # bottom of the image
y2 = y1 * 0.6 # middle point of the image. this point is slightly lower than actual middle
left_line = line_points(y1, y2, left_lane)
right_line = line_points(y1, y2, right_lane)
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
```
### Steps of pipline :
* This below code blocks are defined individually to show what changes each section of the code makes to the code.
```
gray = grayscale(image)
plt.imshow(gray, cmap='gray')
plt.savefig('examples/gray.jpg')
```
* gaussian blur image.
```
kernal_size = 5
blur_gray = gaussian_blur(gray, kernal_size)
plt.imshow(blur_gray)
plt.savefig('examples/blur_gray.jpg')
```
* use of canny edge detection to detect the lane line.
```
low_threshold = 200
high_threshold = 250
edges = canny(gray, low_threshold, high_threshold)
plt.imshow(edges, cmap = 'Greys_r')
plt.savefig('examples/canny_edges.jpg')
```
* selecting region of interest to neglact non useful data from image.
```
# Region of interest start
# Next we'll create a masked edges image using cv2.fillPoly()
# This time we are defining a four sided polygon to mask
mask = np.zeros_like(edges)
ignore_mask_color = 255
imshape = image.shape
# vertices = np.array([[(image.shape[0]-100,imshape[0]),(425, 300), (500, 300), (900,imshape[0])]], dtype=np.int32)
point_A = (imshape[1]*0.1, imshape[0]) # (50,imshape[0])
point_B = (imshape[1]*0.45, imshape[0]*0.6) # (425, 300)
point_C = (imshape[1]*0.55, imshape[0]*0.6) # (500, 300)
point_D = (imshape[1]*0.95, imshape[0]) # (900,imshape[0])
vertices = np.array([[point_A,point_B, point_C, point_D]], dtype=np.int32)
cv2.fillPoly(mask, vertices, ignore_mask_color)
masked_edges = cv2.bitwise_and(edges, mask)
plt.imshow(masked_edges)
plt.savefig('examples/ROI.jpg')
# End of region of intrest
```
* Drawing lines using houghs line method and then combining those details with the color image to display lane line of colored image.
```
# From this part Hough transform paramenters starts
rho = 1
theta = np.pi/180
threshold = 50
min_line_length = 100
max_line_gap = 160
line_image = np.copy(image)*0 # For creating a blank to draw lines on
# masked edges is the output image of region of intrest
lines = hough_lines(masked_edges, rho, theta, threshold, min_line_length, max_line_gap)
#color_edges = np.dstack((masked_edges, masked_edges, masked_edges))
combo = weighted_img(lines, image, 0.8, 1, 0)
plt.imshow(combo)
plt.savefig('examples/lane_lines.jpg')
```
### Pipline :
* Following code has process been piplined to be used in video processing.
```
def process_image(image):
# TODO: put your pipeline here, you should return the final output (image where lines are drawn on lanes)
# gray contains the gray scale version of image
gray = grayscale(image)
#plt.imshow(gray)
kernal_size = 5
blur_gray = gaussian_blur(gray, kernal_size)
plt.imshow(blur_gray)
# Now this gray image is further processed to filter out
# between low and high threshold and detect edges with
# canny edge detection method
low_threshold = 200
high_threshold = 250
edges = canny(gray, low_threshold, high_threshold)
plt.imshow(edges, cmap = 'Greys_r')
# Region of interest start
# Next we'll create a masked edges image using cv2.fillPoly()
# This time we are defining a four sided polygon to mask
mask = np.zeros_like(edges)
ignore_mask_color = 255
imshape = image.shape
# vertices = np.array([[(image.shape[0]-100,imshape[0]),(425, 300), (500, 300), (900,imshape[0])]], dtype=np.int32)
point_A = (imshape[1]*0.1, imshape[0]) # (50,imshape[0])
point_B = (imshape[1]*0.45, imshape[0]*0.6) # (425, 300)
point_C = (imshape[1]*0.55, imshape[0]*0.6) # (500, 300)
point_D = (imshape[1]*0.95, imshape[0]) # (900,imshape[0])
vertices = np.array([[point_A,point_B, point_C, point_D]], dtype=np.int32)
cv2.fillPoly(mask, vertices, ignore_mask_color)
masked_edges = cv2.bitwise_and(edges, mask)
# End of region of intrest
# From this part Hough transform paramenters starts
rho = 1
theta = np.pi/180
threshold = 50
min_line_length = 100
max_line_gap = 160
line_image = np.copy(image)*0 # For creating a blank to draw lines on
# masked edges is the output image of region of intrest
lines = hough_lines(masked_edges, rho, theta, threshold, min_line_length, max_line_gap)
#color_edges = np.dstack((masked_edges, masked_edges, masked_edges))
combo = weighted_img(lines, image, 0.8, 1, 0)
plt.imshow(combo)
result = combo
return result
```
### Video to be processed :
* Initially white image output is created where the processed frames from the video can be saved.
```
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,10)
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
```
| github_jupyter |
```
from nornir import InitNornir
nr = InitNornir(config_file="config.yaml")
```
# Executing tasks
Now that you know how to initialize nornir and work with the inventory let's see how we can leverage it to run tasks on groups of hosts.
Nornir ships a bunch of tasks you can use directly without having to code them yourself. You can check them out [here](../../plugins/tasks/index.rst).
Let's start by executing the `ls -la /tmp` command on all the device in `cmh` of type `host`:
```
from nornir.plugins.tasks import commands
from nornir.plugins.functions.text import print_result
cmh_hosts = nr.filter(site="cmh", role="host")
result = cmh_hosts.run(task=commands.remote_command,
command="ls -la /tmp")
print_result(result, vars=["stdout"])
```
So what have we done here? First we have imported the `commands` and `text` modules. Then we have narrowed down nornir to the hosts we want to operate on. Once we have selected the devices we wanted to operate on we have run two tasks:
1. The task `commands.remote_command` which runs the specified `command` in the remote device.
2. The function `print_result` which just prints on screen the result of an executed task or group of tasks.
Let's try with another example:
```
from nornir.plugins.tasks import networking
cmh_spines = nr.filter(site="bma", role="spine")
result = cmh_spines.run(task=networking.napalm_get,
getters=["facts"])
print_result(result)
```
Pretty much the same pattern, just different task on different devices.
## What is a task
Let's take a look at what a task is. In it's simplest form a task is a function that takes at least a [Task](../../ref/api/task.rst#nornir.core.task.Task) object as argument. For instance:
```
def hi(task):
print(f"hi! My name is {task.host.name} and I live in {task.host['site']}")
nr.run(task=hi, num_workers=1)
```
The task object has access to `nornir`, `host` and `dry_run` attributes.
You can call other tasks from within a task:
```
def available_resources(task):
task.run(task=commands.remote_command,
name="Available disk",
command="df -h")
task.run(task=commands.remote_command,
name="Available memory",
command="free -m")
result = cmh_hosts.run(task=available_resources)
print_result(result, vars=["stdout"])
```
You probably noticed in your previous example that you can name your tasks.
Your task can also accept any extra arguments you may need:
```
def count(task, to):
print(f"{task.host.name}: {list(range(0, to))}")
cmh_hosts.run(task=count,
num_workers=1,
to=10)
cmh_hosts.run(task=count,
num_workers=1,
to=20)
```
## Tasks vs Functions
You probably noticed we introduced the concept of a `function` when we talked about `print_result`. The difference between tasks and functions is that tasks are meant to be run per host while functions are helper functions meant to be run globally.
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
```
## Some things I've learned about the data:
- there were fires in every state except Delaware in 2018.
- Fire names seem to be repeated, but it's hard for me to distinguish how to parse them
Could be cool to look at:
- States with the most fires
- Classes of fires and numbers
- Human vs non-human fires
```
data = pd.read_csv('./2018_FireDetailsDataset.csv')
data.head(5)
```
## Making a dictionary of dataframes by fire size class (A-G)
- Class A: 0.25 acres or less;
- Class B: > 0.25 acres, < 10 acres;
- Class C: >= 10 acres, < 100 acres;
- Class D: >= 100 acres, < 300 acres;
- Class E: >= 300 acres, < 1,000 acres;
- Class F: >= 1,000 acres, < 5,000 acres;
- Class G: >= 5,000 acres.
```
sizeclass = list(sorted(data['FIRE_SIZE_CLASS'].unique()))
sizeclass
size_dic = {}
for size in sizeclass:
size_dic[size] = data.loc[data['FIRE_SIZE_CLASS'] == size]
for (key, value) in size_dic.items():
print('#{}: {}, {}: {} acres burned'.format(key, len(value), key, round(value['FIRE_SIZE'].sum())))
_,_,_ = plt.hist(x=size_dic['A']['FIRE_SIZE'], bins=50)
_,_,_ = plt.hist(x=size_dic['G']['FIRE_SIZE'], bins=50)
size_dic['G'].head()
size_dic['G'].sort_values(by=['FIRE_SIZE'], ascending=False).head()
len(size_dic['G']['FIRE_NAME'].unique())
dg = size_dic['G'][size_dic['G']['CONT_DATE'].isna()].sort_values(by=['FIRE_SIZE'], ascending=False)
size_dic['G'].sort_values(by=['FIRE_SIZE'], ascending = False).head(15)
#size_dic['G'].to_csv('./classg_2018_editedcontdate.csv', index=False)
```
## Cleaning up data?
I didn't find any really great ways to clean the dataset
```
data[data['CONT_DATE'].isna()]
len(data)
len(data['FIRE_NAME'].unique())
data.loc[data['FIRE_NAME'] == 'NEW YEAR']
len(data['MTBS_FIRE_NAME'])
data.loc[data['FIRE_NAME'] == 'CARR']
duplicate = data[data.duplicated(['FIRE_NAME', 'STATE', 'DISCOVERY_DATE', 'NWCG_REPORTING_UNIT_ID', 'CONT_DATE'])]
duplicate
group = data.groupby(['FIRE_NAME', 'STATE', 'DISCOVERY_DATE', 'NWCG_REPORTING_UNIT_ID', 'CONT_DATE']).size().reset_index(name='Freq')
group['FIRE_NAME']
group.loc[group['FIRE_NAME'] == 'NEW YEAR']
duplicate_all = data[data.duplicated(['FIRE_NAME'], )]
duplicate
nan = pd.isna(data['FIRE_NAME'])
nan_mtbs = pd.isna(data['MTBS_FIRE_NAME'])
nan_mtbs
len(nan_mtbs[nan_mtbs])
# 13806 NaN values for FIRE_NAME in the dataset
len(nan[nan])
#nullname
#len(nullname)
```
## Making a dictionary of dataframes by state (2018)
```
states = list(sorted(data['STATE'].unique()))
#states
state_dic = {}
for state in states:
state_dic[state] = data.loc[data['STATE'] == state]
len(data.loc[data['STATE'] == 'AK'])
state_dic['AK']['FIRE_SIZE'].describe()
```
### Summing acres burned on a state basis
```
state_dic['AK']['FIRE_SIZE'].sum()
state_sum = []
for key, value in state_dic.items():
tup = (key, round(value['FIRE_SIZE'].sum()))
state_sum.append(tup)
statesum_df = pd.DataFrame(state_sum, columns = ['state', 'total_acres_burned'])
# adding data in for delaware since it didn't have any
statesum_df.loc[len(statesum_df.index)] = ['DE', 0]
statesum_df.sort_values(by='state', ignore_index=True, inplace=True)
#statesum_df
#statesum_df.to_csv( './burnedacres_bystate.csv', index=False)
_,_,_ = plt.hist(x=statesum_df['total_acres_burned'] , bins=50)
#state_dic['HI']
```
#### Humans vs natural on a per state basis
```
statecause_sum = []
for key, value in state_dic.items():
tup = (key, round(value['FIRE_SIZE'].sum()))
state_sum.append(tup)
cause_list = data['NWCG_CAUSE_CLASSIFICATION'].unique()
cause_list
statecause_list = []
for key, value in state_dic.items():
cause_sumlist = []
for cause in cause_list:
statecause = value.loc[value['NWCG_CAUSE_CLASSIFICATION'] == cause]
statecause_sum = round(statecause['FIRE_SIZE'].sum())
cause_sumlist.append((cause, statecause_sum))
statecause_list.append(cause_sumlist)
#statecause_list
concat_list = []
for df in statecause_list:
dd = pd.DataFrame(df).T
dd.columns = dd.iloc[0]
dd.drop(dd.index[0], inplace=True)
concat_list.append(dd)
sumcause_df = pd.concat(concat_list)
sumcause_df.insert(0, 'State', list(state_dic.keys()))
sumcause_df.reset_index(drop=True, inplace=True)
#sumcause_df
```
# Loading in Big Data Set
to make sure I didn't lose anything in the transfer
```
big_data = pd.read_csv('./1992to2018_FireDetails.csv', parse_dates=['DISCOVERY_DATE', 'CONT_DATE'], low_memory=False)
big_data.shape
```
### Breakdown of causes of fires by 'Cause Classification' and the more specific 'General Cause'
I think this can be looked at as cause classification as the broad category and general cause as the subsidiary
```
big_data['NWCG_CAUSE_CLASSIFICATION'].unique()
cause = (big_data['NWCG_CAUSE_CLASSIFICATION'].value_counts())
cause.sort_index()
big_data['NWCG_GENERAL_CAUSE'].unique()
gcause = big_data['NWCG_GENERAL_CAUSE'].value_counts().sort_index()
gcause
```
### Year dictionary
```
years = list(sorted(big_data['FIRE_YEAR'].unique()))
#years
year_dic = {}
for year in years:
year_dic[year] = big_data.loc[big_data['FIRE_YEAR'] == year]
year_sum = []
for key, value in year_dic.items():
tup = (key, value.shape[0],
round(value['FIRE_SIZE'].sum()))
year_sum.append(tup)
yearsum_df = pd.DataFrame(year_sum, columns =
['year', 'total_fires',
'total_acres_burned'])#,
#'cause_classification',
#'general_cause'])
yearsum_df.head()
#yearsum_df.to_csv('./firesummary_byyear.csv', index=False)
```
-----
-----
### Making csv's for causes
```
causeclass = value['NWCG_CAUSE_CLASSIFICATION'].value_counts().sort_index()
gencause = value['NWCG_GENERAL_CAUSE'].value_counts().sort_index()
#gcause = big_data['NWCG_GENERAL_CAUSE'].value_counts().sort_index().to_frame().T
#gcause
classcause_sum = []
for key, value in year_dic.items():
dfclass = value['NWCG_CAUSE_CLASSIFICATION'].value_counts().sort_index().to_frame().T
classcause_sum.append(dfclass)
#classcause_sum
df_classcause = pd.concat(classcause_sum)
df_classcause.insert(0, 'year', list(year_dic.keys()))
df_classcause.reset_index(drop=True, inplace=True)
gencause_sum = []
for key, value in year_dic.items():
dfgen = value['NWCG_GENERAL_CAUSE'].value_counts().sort_index().to_frame().T
gencause_sum.append(dfgen)
#gencause_sum
df_gencause = pd.concat(gencause_sum)
#df_gencause
#df_gencause.insert(0, 'year', list(year_dic.keys()))
df_gencause.reset_index(drop=True, inplace=True)
df_gencause['Firearms and explosives use'] = df_gencause['Firearms and explosives use'].fillna(0).astype(int)
df_classcause.head()
df_gencause.head()
df_causecombo = pd.concat([df_classcause, df_gencause], axis=1)
df_causecombo.head()
#df_causecombo.to_csv('./causeoffire_byyear.csv', index=False)
# create figure and axis objects with subplots()
fig,ax = plt.subplots()
# make a plot
#ax.plot(yearsum_df.year, yearsum_df.total_acres_burned, color="red", marker="o")
# set x-axis label
#ax.set_xlabel("year",fontsize=14)
# set y-axis label
#ax.set_ylabel("total_acres_burned",color="red",fontsize=14)
# twin object for two different y-axis on the sample plot
ax2=ax.twinx()
# make a plot with different y-axis using second axis object
ax2.plot(df_causecombo.year, df_causecombo["Human"],color="blue",marker="o")
ax2.plot(df_causecombo.year, df_causecombo["Natural"],color="green",marker="o")
ax2.set_ylabel("Human/Natural", fontsize=14)
plt.show()
plt.plot(yearsum_df['year'], yearsum_df['total_acres_burned'], marker = 'o', color='red')
plt.xlabel('Year')
plt.ylabel('Total Acres Burned')
plt.ylim(0,10545663)
plt.show()
#plt.plot(df_causecombo['year'], df_causecombo['Human'])
#plt.plot(df_causecombo['year'], df_causecombo['Natural'])
```
----
----
### Size changes over years
Checking Class data to see if large wildfires increased over the years recorded
```
sizeclass_sum = []
for key, value in year_dic.items():
dfsize = value['FIRE_SIZE_CLASS'].value_counts().sort_index().to_frame().T
sizeclass_sum.append(dfsize)
#sizeclass_sum[0]
df_sizeclass = pd.concat(sizeclass_sum)
#df_sizeclass
df_sizeclass.insert(0, 'year', list(year_dic.keys()))
df_sizeclass.reset_index(drop=True, inplace=True)
df_sizeclass.head()
df_sizeclass.columns
df_sizeclass2 = df_sizeclass.set_axis(['year', '#A', '#B', '#C', '#D', '#E', '#F', '#G'], axis=1)
df_sizeclass2.head()
```
---
```
sizeclass
megalist = []
for key, value in year_dic.items():
class_sumlist = []
for size in sizeclass:
yrclass = value.loc[value['FIRE_SIZE_CLASS'] == size]
class_sum = round(yrclass['FIRE_SIZE'].sum())
class_sumlist.append((size, class_sum))
megalist.append(class_sumlist)
megalist
concat_list = []
for df in megalist:
dd = pd.DataFrame(df).T
dd.columns = dd.iloc[0]
dd.drop(dd.index[0], inplace=True)
concat_list.append(dd)
sumdf = pd.concat(concat_list)
#sumdf.insert(0, 'year', list(year_dic.keys()))
sumdf.reset_index(drop=True, inplace=True)
sumdf.head()
sumdf2 = sumdf.set_axis(['A_acres', 'B_acres', 'C_acres', 'D_acres', 'E_acres', 'F_acres', 'G_acres'], axis=1)
sumdf2.head()
df_sizeclasscombo = pd.concat([df_sizeclass2, sumdf2], axis=1)
df_sizeclasscombo.head()
#df_sizeclasscombo.to_csv('./firesizeclass_byyear.csv', index=False)
sumdf2['%G'] = (sumdf2['G_acres']/sumdf.sum(axis=1))
sumdf2['%B'] = (sumdf2['B_acres']/sumdf.sum(axis=1))
sumdf2['%F'] = (sumdf2['F_acres']/sumdf.sum(axis=1))
sumdf2.head()
plt.plot(df_sizeclass2['year'], sumdf2['%B'], marker='^', color='orange', label='class B')
plt.plot(df_sizeclass2['year'], sumdf2['%F'], marker='x', color='green', label='class F')
plt.plot(df_sizeclass2['year'], sumdf2['%G'], marker='o', label='class G')
plt.xlabel('year')
plt.ylabel('% of total fires')
plt.legend()
plt.ylim(0,1)
plt.show()
```
## 2018 data check
```
data2018 = big_data.loc[big_data['FIRE_YEAR'] == 2018]
data2018['DISCOVERY_DATE'].head()
#data2018.sort_values(by=('DISCOVERY_DATE'))
data2018.columns
col_diff = list(set(list(data2018.columns)) - set(list(data.columns)))
col_diff
datadrop = data2018.drop(col_diff, axis=1)
datadrop
datadrop.loc[datadrop['FIRE_NAME'] == 'SPRING CREEK']
```
| github_jupyter |
### Importing Libraries
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
```
### Import Dataset
```
dataset = pd.read_csv("Restaurant_Reviews.tsv", delimiter="\t",quoting=3)
```
### Cleaning the Texts
```
import re #simplify reviews
import nltk #for NLP,it allows us to download ensemble-> stop words
nltk.download("stopwords")
from nltk.corpus import stopwords #for importing stopwords
#stopwords are used to remove article (ex:-an,a,the,etc)
#which are not imp and does not give any hint for reviews we want
from nltk.stem.porter import PorterStemmer #to apply stemming over useful words
"""stemming consists of taking only the root of a word
that indicates enough about what this word means
example:-i loved it OR i love it.
here loved will be stemmed to love so just to simplify
the process. It helps to reduce matrix size of sparse matrix"""
corpus = [] #empty list that will contain all cleaned data
for i in range(0,1000): #for loop is used to filter for the cleaned data and store them in corpus=[] empty list
review = re.sub("[^a-zA-Z]"," ",dataset["Review"][i]) #convert punctuations,etc with spaces
review = review.lower() #convert all to lower case
review = review.split() #split the reviews into separate words so as to apply stemming on each of them
ps = PorterStemmer() #variable storing PorterStemmer() class
all_stopwords = stopwords.words('english')
all_stopwords.remove('not')
review = [ps.stem(word) for word in review if not word in set(all_stopwords)]
review = ' '.join(review)
corpus.append(review)
print(corpus)
```
## Creating the Bag of model
```
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features = 1500) #Most frequent coming words will be removed from the sparse matrixx
x = cv.fit_transform(corpus).toarray()
y = dataset.iloc[:,-1].values
len(x[0])
```
## Splitting the Dataset into the training set and test set
```
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state = 0)
```
## Training the Naive Bayes Model on the Training set
```
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(x_train,y_train)
y_pred = classifier.predict(x_test)
print(np.concatenate((y_pred.reshape(len(y_pred),1),y_test.reshape(len(y_test),1)),1))
```
## Making the confusion matrix
```
from sklearn.metrics import confusion_matrix, accuracy_score
cn = confusion_matrix(y_test,y_pred)
print(cn)
accuracy_score(y_test,y_pred)
```
| github_jupyter |
# 2040 le cap des 100% de voitures électriques
*Etude data - Projet 8 - @Nalron (août 2020)*\
*Traitement des données sur Jupyter Notebook (Distribution Anaconda)*\
*Etude réalisée en langage Python*
Visualisation des Tableaux de bord: [Tableau Public](https://public.tableau.com/profile/nalron#!/vizhome/ElectricCarsFrance2040/Vuedensemble)
---
# Rappel des missions
### [Mission 1 : Positionnement de la voiture électrique en France](https://github.com/nalron/project_electric_cars_france2040/blob/french_version/p8_notebook01.ipynb)
Évolution du parc automobile électrique à 2 ans.<br>
Identification et classification des inégalités locales des voitures électriques.<br>
Autonomie et consommation moyenne d'une voiture électrique.
### [Mission 2 : Besoin des déploiements en IRVE](https://github.com/nalron/project_electric_cars_france2040/blob/french_version/p8_notebook02.ipynb)
Évolution du nombre de points de recharge disponibles ouverts au public.<br>
Analyse de la répartition par borne de recharge, type de prise et catégorie d’aménageur.<br>
Utilisation des ratios pour le dimensionnement d'un maillage de taille optimale.<br>
Prévision du nombre de PDC à horizon 2025.<br>
### [Mission 3 : Appel de charge au réseau électrique](https://github.com/nalron/project_electric_cars_france2040/blob/french_version/p8_notebook03.ipynb)
Analyse de la consommation d'électricité en France et des filières de production.<br>
Profiler un pic d’utilisation des bornes de recharge.<br>
Courbe de charge réseau électrique pour répondre aux nouveaux modes de consommation.
---
```
#Import des principales librairies Python
import pandas as pd
import plotly.figure_factory as ff
import requests
import seaborn as sns
%pylab inline
```
## Mission 2 : Besoin des déploiements en IRVE<a id="borne">
__`Traitement des données sur les points de charge par typologie`__
Ce jeu de données présente le nombre total de points de charge en France continentale.
Les points de charge sont matérialisés par un socle de prise sur lequel un véhicule électrique peut potentiellement se brancher. Une borne de recharge peut comporter un ou plusieurs points de charge. Les données présentées segmentent les points de charge en trois typologies :
- Les points de charge « accessible au public » correspondent aux points de charge accessibles dans les commerces (supermarché, concession automobile…), parking, sites publics ou stations en voirie.
- Les points de charge « particulier » sont des points de charges privés localisés dans le résidentiel collectif (immeubles, copropriétés…) ou individuel (pavillons).
- Les points de charge « société » sont des points de charge privés localisés dans les sociétés et réservés à l’activité de la société ou à la recharge des véhicules électriques des employés.
Le jeu de données a été élaboré par Enedis à partir de ses données propres combinées avec certaines données externes, issues des sociétés Girève et AAA Data. Les données sur les points de charge « particulier » et « société » sont une reconstitution de l’existant construite par Enedis sur la base d’hypothèses. Ces hypothèses s’appuient sur l’évolution du marché du véhicule électrique.
```
#Chargement du jeu de données "nombre-de-points-de-charge-par-typologie.csv"
irve_type = pd.read_csv('p8_data/nombre-de-points-de-charge-par-typologie.csv', sep=';')
display(irve_type.shape)
display(irve_type.head())
#Analyse des valeurs de la variable 'Nombre'
irve_type['Nombre'].unique()
#plt.figure(figsize=(12,3))
irve_type.boxplot(column= 'Nombre', by='Année')
plt.show()
```
Il ne semble pas avoir de valeur aberrante dans les valeurs de la variable 'Nombre'. Pour rappel, ici nous avons les points de charge électriques quantifiés par année et trimestre.
```
#Mise en forme plus logique des données selon l'année et le trimestre
irve_type = irve_type.pivot_table(index=['Année', 'Trimestre'],
columns='Typologie',
values='Nombre').reset_index()
irve_type.columns.name = None
irve_type
#Calcul des évolutions en % entre chaque trimestre
for i, row in irve_type.iterrows():
if i+1 < len(irve_type):
number_public = ((irve_type.loc[i+1, 'Accessible au public']
- irve_type.loc[i, 'Accessible au public']) / (irve_type.loc[i, 'Accessible au public'])*100)
irve_type.loc[i+1, '%Public'] = round(number_public, 2)
if i+1 < len(irve_type):
number_particulier = ((irve_type.loc[i+1, 'Particulier']
- irve_type.loc[i, 'Particulier']) / (irve_type.loc[i, 'Particulier'])*100)
irve_type.loc[i+1, '%Particulier'] = round(number_particulier, 2)
if i+1 < len(irve_type):
number_societe = ((irve_type.loc[i+1, 'Société']
- irve_type.loc[i, 'Société']) / (irve_type.loc[i, 'Société'])*100)
irve_type.loc[i+1, '%Société'] = round(number_societe, 2)
else :
irve_type.fillna(0, inplace=True)
pass
#Modification des Trimestres pour obtenir un Time Series
irve_type.replace({'T1' : '31-03',
'T2' : '30-06',
'T3' : '30-09',
'T4' : '31-12'},
inplace=True)
irve_type['Time'] = irve_type['Année'].astype(str)+ str("-")+irve_type['Trimestre']
irve_type['Time'] = pd.to_datetime(irve_type['Time'], format="%Y-%d-%m")
#Affichage du dataframe enrichi
irve_type
#Affichage des types de données /Variables
irve_type.dtypes
#Sauvegarde
irve_type.to_csv('p8_datatable/irve_type.csv')
#Analyse des valeurs manquantes du jeu de données
irve_type.isna().any()
#Analyse des valeurs doublons du jeu de données
irve_type.duplicated().any()
#Années traitées dans ce jeu de données list
list(irve_type['Année'].unique())
```
__`Traitement des données sur les bornes de recharge pour vehicules electriques (IRVE)`__
Ce fichier est une version consolidée des sources suivantes: Stations Tesla, Bornes de la Métropole de Rennes, Bornes dans les Concessions Renault, Bornes Autolib', Plus de Bornes, opérateur en Provence, Compagnie Nationale du Rhône, Magasins E.Leclerc
Données ajoutées en décembre 2014: Vincipark/Sodetrel, Grand Lyon, Morbihan Energies
Données ajoutées en octobre 2015: Magasins AUCHAN, Concessions NISSAN, Réseau ALTERBASE, SyDEV, Freshmile, EFFIA
Données ajoutées en mai 2016: SDE18, SDE24, SDE28, SDE32, MOVeasy, Seine Aval, SIEML, SDESM, Vienne
```
#Chargement du jeu de données "fichier-consolide-des-bornes-de-recharge-pour-vehicules-electriques-irve"
irve = pd.read_csv('p8_data/fichier-consolide-des-bornes-de-recharge-pour-vehicules-electriques-irve.csv',
sep=';')
display(irve.shape)
display(irve.head())
```
Le premier point de contrôle passe par la recherche d'éventuels doublons. Notons que le contexte métier nécessite de la rigueur dans l'interprétation de certaines variables, l'amalgame entre station, borne et point de charge est régulièrement rencontré. Donc, "id_station" n'est pas le sous-ensemble le plus approprié à l'identification de doublons, une station de recharge peut avoir plusieurs points de charge, et l'identifiant ne tient pas compte du point de charge. Notons que "id_pdc" permet d'obtenir des identifiants uniques pouvant cette fois-ci être pris comme sous-ensemble.
```
#Test de recherche des éventuels doublons à partir de la variable 'id_pdc'
irve.duplicated(subset='id_pdc').sum()
```
Notons que le fichier mis à disposition sur le site data.gouv.fr annonce plusieurs consolidations selon les années 2014 à 2016 et 2018. Attention, quelques opérateurs comme Tesla, Nissan, Auchan, etc… ne sont plus observés dans la version de juin 2020 et même depuis plusieurs mois. Non pas parce que ces stations de recharge ont été retirées, mais par logique d'uniformisation selon une charte d'utilisation "Fichiers à destination des aménageurs et opérateurs publics et privés d'infrastructures de recharge pour véhicules électriques" consultable sur [data.gouv.fr](https://www.data.gouv.fr/fr/datasets/fichiers-pour-les-infrastructures-de-recharge-de-vehicules-electriques/)
<em>Le décret 2017-26 du 12 janvier 2017 fixe les exigences requises pour la configuration des points de recharge à publier sur un nouveau fichier désormais en CSV. L'aménageur, ou l'opérateur désigné le cas échéant, prend les mesures appropriées pour que ces données soient en permanence tenues à jour et rendues publiques sur data.gouv.fr</em>
<u>Dans le cadre de l'étude, les opérateurs (ou principaux opérateurs) identifiés comme manquants seront réintégrés dans l'échantillon.</u>
```
#Combien de stations de recharge (en anglais Charging Station Pool) à Juin 2020?
irve.id_station.nunique()
#Combien de bornes de recharge (en anglais Charging Station) à Juin 2020?
irve.id_pdc.nunique()
```
**Combien de points de charge (en anglais Charging Point ou EVSE) à Juin 2020?**
Selon la définition de l'AFIREV, le point de charge représente le nombre d'emplacement individuel permettant le stationnement du véhicule pendant le temps de charge, donc le nombre de prises de la borne. Le jeu de données `irve` ne permet pas de le quantifier directement, malgré la présence d'une variable 'nbre_pdc' qui ne représente que la borne et non le nombre de prises. Notons qu'il est nécessaire d'enrichir les données par une estimation des prises de chacune des bornes, ce calcul pourra être réalisé à l'aide de la variable 'type_prise'. <u>Cet enrichissement sera fait plus tard après intégration des opérateurs manquants.</u>
### Exploitation des opérateurs et aménageurs manquants
```
#Chargement du jeu de données de l'enseigne "Mobive"
#https://www.data.gouv.fr/fr/datasets/infrastructures-de-recharge-pour-vehicules-electriques-mobive-1/
mobive = pd.read_csv('p8_data/irve-mobive-20200331.csv', sep=';', decimal=",")
display(mobive.shape)
display(mobive.head())
#Test de matching des variables avant concaténation
display(irve.columns)
display(mobive.columns)
#Chargement du jeu de données de la grande distribution LECLERC
#https://www.data.gouv.fr/fr/datasets/localisation-des-bornes-de-recharge-
#pour-vehicules-electriques-dans-les-magasins-e-leclerc/
leclerc = pd.read_csv('p8_data/leclerc.csv', sep=';', decimal=",")
display(leclerc.shape)
display(leclerc.head())
#Test de matching des variables avant concaténation
display(irve.columns)
display(leclerc.columns)
#Divergences à traiter avant concaténation des données
leclerc.rename(columns={
'nom_station': 'n_amenageur',
'nom_porteur': 'n_enseigne',
'ID_station': 'id_station',
'adresse_station': 'ad_station',
'longitude_WSG84': 'Xlongitude',
'latitude_WSG84': 'Ylatitude',
'type_connecteur': 'type_prise',
'type_charge': 'puiss_max'
}, inplace=True)
#Remplacement des modalités de la variable 'puiss_max'
leclerc['puiss_max'] = 22
#Chargement du jeu de données des bornes de la grande distribution AUCHAN
#https://www.data.gouv.fr/fr/datasets/reseau-bornes-de-recharge-rapide-auchan/
auchan = pd.read_csv('p8_data/auchan.csv', sep=';')
display(auchan.shape)
display(auchan.head())
#Fusion des variables relatives à l'adresse de la station
auchan['ad_station'] = auchan['ADRESSE'] + str(' ') + auchan['CP'].astype(str) + str(' ') + auchan['Unnamed: 5']
#Renommage des variables à traiter avant concaténation des données
auchan.rename(columns={
'LIEU': 'n_amenageur',
'Latitude': 'Ylatitude',
'Longitude': 'Xlongitude'
}, inplace=True)
auchan.drop(columns=['N°', 'ADRESSE', 'CP', 'LIEN CHARGEMAP', 'Dept',
'Unnamed: 5', 'Unnamed: 9', 'Unnamed: 10'], inplace=True)
#Intégration d'une variable 'puiss_max' représentatif de la puissance maximale
#disponible dans plus de 90% des centres commerciaux AUCHAN
auchan['puiss_max'] = 50
#Chargement du jeu de données des bornes des parkings EFFIA
#https://www.data.gouv.fr/fr/datasets/bornes-de-recharge-pour-vehicules-electriques-parking-effia/
effia = pd.read_csv('p8_data/effia.csv', sep=';')
display(effia.shape)
display(effia.head())
#Renommage des variables à traiter avant concaténation des données
effia.rename(columns={
'nom_station': 'n_amenageur',
'adresse_station': 'ad_station',
'latitude_WSG84': 'Ylatitude',
'longitude_WSG84': 'Xlongitude',
'type_connecteur': 'type_prise',
'type_charge': 'puiss_max',
'nom_porteur': 'n_enseigne'
}, inplace=True)
effia.drop(index=0, inplace=True)
effia.drop(columns=['ID_station', 'observations', 'Unnamed: 11', 'Unnamed: 12', 'Unnamed: 13','Unnamed: 14'],
inplace=True)
#Changement de modalité de la variable 'puiss_max'
effia['puiss_max'] = 3.7
#Chargement du jeu de données des bornes des parkings VINCI
vinci = pd.read_csv('p8_data/vincipark.csv', sep=';')
display(vinci.shape)
display(vinci.head())
#Renommage des variables à traiter avant concaténation des données
vinci.rename(columns={
'nom_station': 'n_station',
'adresse_station': 'ad_station',
'latitude': 'Ylatitude',
'longitude': 'Xlongitude',
'nom_porteur': 'n_enseigne',
'type_connecteur': 'type_prise',
}, inplace=True)
vinci.drop(columns=['ID_station', 'type_charge'], inplace=True)
#Chargement du jeu de données des bornes TESLA connecteur Recharge à destination
#https://www.data.gouv.fr/fr/datasets/recharge-a-destination-tesla/
tesla = pd.read_csv('p8_data/irve-tesla-destination-charging-20181130.csv', sep=';')
display(tesla.shape)
display(tesla.head())
#Changement de modalité pour la variable 'type_prise'
tesla['type_prise'] = "Tesla Type 2"
#Remplacement de la modalité 'A Cheda' par 'Tesla'
tesla['n_amenageur'].replace('A Cheda', 'Tesla', inplace=True)
#Renommage des variables à traiter avant concaténation des données
tesla.rename(columns={'Xlatitude': 'Ylatitude'}, inplace=True)
tesla.drop(columns=['ID_station', 'ID_pdc'], inplace=True)
#Chargement du jeu de données des bornes TESLA Supercharger
#https://www.data.gouv.fr/fr/datasets/stations-supercharger-tesla/
tesla_supercharger = pd.read_csv('p8_data/irve-tesla-supercharger-20181130.csv', sep=';')
display(tesla_supercharger.shape)
display(tesla_supercharger.head())
#Renommage d'une variable à traiter avant concaténation des données
tesla_supercharger.rename(columns={'accessibilite' : 'accessibilité'}, inplace=True)
#Changement de modalité pour la variable 'type_prise'
tesla_supercharger['type_prise'] = "Tesla Supercharger"
#Chargement du jeu de données des bornes des Concessionnaires NISSAN
#https://www.data.gouv.fr/fr/datasets/reseau-bornes-de-recharge-rapide-concessions-nissan/
nissan = pd.read_csv('p8_data/nissan.csv', sep=';')
display(nissan.shape)
display(nissan.head())
#Suppression d'une observation NaN
nissan.drop(index=58, inplace= True)
#Adaptation de la variable représentative des modalités de l'adresse
nissan['ad_station'] = nissan['ADRESSE'] + str(' ') + nissan['CP'].astype(str) + str(' ') + nissan['VILLE']
#Renommage des variables à traiter avant concaténation des données
nissan.rename(columns={
'LIEU': 'n_enseigne',
'Type': 'type_prise',
'Latitude': 'Ylatitude',
'Longitude': 'Xlongitude'
}, inplace=True)
nissan.drop(columns=['ADRESSE', 'CP', 'Dept', 'VILLE', 'Code concession', 'Unnamed: 8', 'Téléphone',
'Directeur Concession Nissan', 'Unnamed: 13', 'Unnamed: 14', 'LIEN CHARGEMAP'], inplace=True)
#Chargement du jeu de données des bornes des Concessionnaires RENAULT
#https://www.data.gouv.fr/fr/datasets/reseau-bornes-de-recharge-rapide-concessions-nissan/
renault = pd.read_csv('p8_data/renault.csv', sep=';', decimal=",")
display(renault.shape)
display(renault.head())
#Renommage des variables à traiter avant concaténation des données
renault.rename(columns={
'nom_station': 'n_station',
'longitude_WSG84': 'Ylatitude',
'latitude_WSG84': 'Xlongitude',
'nom_porteur': 'n_enseigne',
'type_connecteur': 'type_prise',
'type_charge':'puiss_max',
'observation': 'observations'
}, inplace=True)
renault.drop(columns=['ID_station', 'adresse_station', 'nbre_pdc'], inplace=True)
#Intégration d'une variable 'puiss_max'
renault['puiss_max'] = 22
#Concaténation des jeux de données
irvePlus = pd.concat([irve, mobive, leclerc, auchan, effia, vinci, tesla, tesla_supercharger, nissan, renault],
sort=False).reset_index(drop=True)
#Affichage des 5 premières observations
irvePlus.head()
#Affichage du nombre d'observations
#Ici une observation représente une Borne de recharge
len(irvePlus)
#Analyse des valeurs manquantes
irvePlus.isna().sum()
```
Les précédentes manipulations font que des valeurs et modalités doivent être manquantes, visibles ci-dessus. Notons que dans le contexte de l'étude, il n'est pas nécessaire d'avoir 100% des données suivant les observations, voyons comment traiter ces NaN.
#### Traitement NaN des variables n_amenageur, n_operateur et n_enseigne
```
#Traitement des NaN relatifs aux aménageurs selon l'enseigne
irvePlus[irvePlus['n_amenageur'].isna()]['n_enseigne'].unique()
#Boucle permettant le remplacement des valeurs manquantes des aménageurs selon condition
for i, row in irvePlus.iterrows():
if row['n_enseigne'] == 'SIPLEC':
irvePlus.loc[i, 'n_amenageur'] = 'LECLERC'
elif row['n_enseigne'] == 'EFFIA':
irvePlus.loc[i, 'n_amenageur'] = 'EFFIA'
elif row['n_enseigne'] == 'Sodetrel':
irvePlus.loc[i, 'n_amenageur'] = 'IZIVIA'
elif row['n_enseigne'] == 'Concession NISSAN' or row['n_enseigne'] == 'NISSAN WEST EUROPE TRAINING' or row['n_enseigne'] == 'Siège NISSAN France':
irvePlus.loc[i, 'n_amenageur'] = 'NISSAN'
elif row['n_enseigne'] == 'Renault':
irvePlus.loc[i, 'n_amenageur'] = 'RENAULT'
else :
pass
#Traitement des NaN relatifs aux opérateurs selon l'aménageur
irvePlus[irvePlus['n_operateur'].isna()]['n_amenageur'].unique()
#Boucle permettant le remplacement des valeurs manquantes des aménageurs selon condition
for i, row in irvePlus.iterrows():
if row['n_amenageur'] == 'LECLERC':
irvePlus.loc[i, 'n_operateur'] = 'LECLERC'
elif row['n_amenageur'] == 'AUCHAN ':
irvePlus.loc[i, 'n_operateur'] = 'AUCHAN'
elif row['n_amenageur'] == 'EFFIA':
irvePlus.loc[i, 'n_operateur'] = 'EFFIA'
elif row['n_amenageur'] == 'IZIVIA':
irvePlus.loc[i, 'n_operateur'] = 'IZIVIA'
elif row['n_amenageur'] == 'NISSAN':
irvePlus.loc[i, 'n_operateur'] = 'NISSAN'
elif row['n_amenageur'] == 'RENAULT':
irvePlus.loc[i, 'n_operateur'] = 'RENAULT'
else :
pass
#Traitement des NaN relatifs aux enseignes selon l'opérateur
irvePlus[irvePlus['n_enseigne'].isna()]['n_operateur'].unique()
#Boucle permettant le remplacement des valeurs manquantes des enseignes selon condition
for i, row in irvePlus.iterrows():
if row['n_operateur'] == 'CITEOS/FRESHMILE':
irvePlus.loc[i, 'n_enseigne'] = 'Scame'
elif row['n_operateur'] == 'New motion':
irvePlus.loc[i, 'n_enseigne'] = 'New motion'
elif row['n_operateur'] == 'MOUVELECVAR':
irvePlus.loc[i, 'n_enseigne'] = 'MOUVELECVAR'
elif row['n_operateur'] == 'SAINT-LOUIS':
irvePlus.loc[i, 'n_enseigne'] = 'SAINT-LOUIS'
elif row['n_operateur'] == 'SPIE':
irvePlus.loc[i, 'n_enseigne'] = 'SDEY'
elif row['n_operateur'] == 'AUCHAN':
irvePlus.loc[i, 'n_enseigne'] = 'AUCHAN'
else :
pass
```
#### Traitement NaN des variables Xlongitude et Ylatitude
```
#Traitement des deux NaN 'Xlongitude' et 'Ylatitude'
irvePlus[irvePlus['Xlongitude'].isna()]
#Intégration manuelle des 4 valeurs
irvePlus['Xlongitude'][2188] = 4.0811882
irvePlus['Xlongitude'][2189] = 4.0811882
irvePlus['Ylatitude'][2188] = 46.0822754
irvePlus['Ylatitude'][2189] = 46.0822754
```
#### Traitement NaN de la variable 'type_prise'
```
#Traitement des valeurs NaN identifiées pour le type de prise
irvePlus[irvePlus['type_prise'].isna()]['n_operateur'].unique()
```
Globalement le groupe Auchan à équipé ses parkings de bornes Type 2 + CHAdeMO, notons que l'échantillon sera donc complété selon cette hypothèse, hypothèse retenue et préférable devant les NaN.
```
#Boucle permettant le remplacement des valeurs manquantes identifiées ci-dessus
for i, row in irvePlus.iterrows():
if row['n_operateur'] == 'AUCHAN':
irvePlus.loc[i, 'type_prise'] = 'Type 2 + CHAdeMO'
```
#### Traitement NaN de la variable 'puiss_max'
```
#Traitement des valeurs NaN identifiées pour la puissance max.
#Le type de prise permet de pouvoir intervenir sur ces valeurs manquantes
irvePlus[irvePlus['puiss_max'].isna()]['type_prise'].unique()
#Boucle permettant le remplacement des valeurs manquantes des puissances max. identifiées ci-dessus
for i, row in irvePlus.iterrows():
if row['type_prise'] == 'TE-T3':
irvePlus.loc[i, 'puiss_max'] = 22
elif row['type_prise'] == 'TE-T2':
irvePlus.loc[i, 'puiss_max'] = 22
elif row['type_prise'] == 'DC Chademo - 44 kWh':
irvePlus.loc[i, 'puiss_max'] = 44
elif row['type_prise'] == 'DC Chademo - 44 kWh + \nAC Type 3 - 43 kWh':
irvePlus.loc[i, 'puiss_max'] = 44
else:
pass
#Nouvelle situation après ce traitement NaN
irvePlus.isna().sum()
```
L'enrichissement de l'échantillon de départ `irve` rend l'exploitation de la variable 'id_pdc' obsolète. En effet, la concaténation avec les autres sources de données permet un recensement plus complet du réseau, mais sans pouvoir obtenir une charte d'utilisation commune et complète. En l'occurrence les id des points de charge ne sont plus complets, notons donc qu'il est nécessaire d'intégrer un identifiant unique à cet usage.
```
#Intégration d'un identifiant unique par Point de charge
irvePlus['id_borne']= np.arange(1, len(irvePlus)+1)
```
Il n'est pas nécessaire de traiter toutes les valeurs NaN, dans le contexte de l'étude les précédents traitements semblent être suffisants. Voyons immédiatement comment enrichir et optimiser ce qui peut l'être, comme par exemple les puissances et les types de prise.
#### Traitement à des fins d'uniformisation des modalités / valeurs de la variable 'puiss_max'
```
#Affichage des modalités et valeurs de la variable 'puiss_max'
irvePlus.puiss_max.unique()
```
Difficilement exploitable, on peut comprendre que chaque "acteur" à l'origine des fichiers ait pu nommer les puissances selon ses propres codes ou habitudes, mais il est nécessaire de pouvoir clarifier le tout. Notons que l'étude menée est porteuse d'un message plus perceptible quelque soit l'interlocuteur, voyons comment mettre en place un classement des puissances.
```
#Boucle pour éliminer les modalités dont l'unité 'kva' est mentionnée
for x in irvePlus['puiss_max']:
if x == '36kva':
irvePlus['puiss_max'].replace(x, '36', inplace=True)
elif x == '22kva':
irvePlus['puiss_max'].replace(x, '22', inplace=True)
elif x == '48kva':
irvePlus['puiss_max'].replace(x, '48', inplace=True)
elif x == '43-50':
irvePlus['puiss_max'].replace(x, '50', inplace=True)
else:
pass
#Recherche des valeurs '0', '0.0' et 60.000
irvePlus[(irvePlus.puiss_max == '0') | (irvePlus.puiss_max == '0.0') | (irvePlus.puiss_max == '60.000')]
#Remplacement des valeurs '0', '0.0' et '60.000'
irvePlus['puiss_max'].replace('0', 22, inplace=True)
irvePlus['puiss_max'].replace('0.0', 22, inplace=True)
irvePlus['puiss_max'].replace('60.000', 22, inplace=True)
#Changement du type de donnée variable 'puiss_max' afin de faciliter son traitement
irvePlus['puiss_max'] = irvePlus.puiss_max.astype(float)
#Classification des puissances via une boucle sous condition
class_puiss = []
for value in irvePlus.puiss_max:
if value <= 3.7 :
class_puiss.append('Recharge normale 3,7 kVA')
elif value > 3.7 and value <=20 :
class_puiss.append('Recharge accélérée de 3,7 à 20 kVA')
elif value == 22 :
class_puiss.append('Recharge accélérée 22 kVA')
elif value >= 43 and value <= 50 :
class_puiss.append('Recharge rapide 43 à 50 kVA')
else :
class_puiss.append('Recharge haute puissance 100 à 350 kVA')
#Intégration d'une nouvelle variable 'class_puiss'
irvePlus['class_puiss'] = class_puiss
```
#### Traitement à des fins d'uniformisation des modalités de la variable 'type_prise'
```
irvePlus.type_prise.unique()
```
Le constat reste le même que pour les puissances, les modalités listées ci-dessus sont difficilement exploitables en l'état. Notons qu'il est nécessaire de pouvoir classifier correctement et de manière lisible les connecteurs des bornes.
```
#Création de listes groupant les diverses typologies rencontrées dans l'échantillon
list_ef_x1 = ['EF', 'E/F', 'E', 'AC socket']
list_tesla_supercharger_x1 = ['Tesla Supercharger']
list_chademo_x1 = ['CHADEMO', 'CAHDEMO', 'CHAdeMO', 'Chademo', 'chademo', 'DC Chademo - 44 kWh', 'CHAdeMO-EU']
list_t2_x1 = ['T2', 'T2 câble attaché', 'Borne PULSE QC-50 de chez LAFON, Recharge Rapide sur prise T2',
'semi-rapide', 'AC plug', 'Tesla Type 2', '22', '23']
list_t3_x1 = ['T3', 'Type 3c', 'DC Chademo - 44 kWh + \nAC Type 3 - 43 kWh']
list_combo_x1 = ['COMBO', 'Combo 2', 'Combo2', 'COMBO 2', 'combo2', 'combo', 'Borne LAFON - recharge rapide 43AC-50DC']
list_combo_ccs350_x1 = ['CCS350-CCS350-CCS350-CCS350', 'CCS350-CCS350-CCS350-CCS350-CCS350-CCS350']
list_t2_ef_x2 = ['EF - T2', 'T2 - E/F', 'E/F-T2', 'T2 - EF', 'T2/EF', 'T2-EF', 'T2-AC Triphasé', 'T2/TE', 'E/F - T2',
'E/F + T2', 'EF/T2', 'T2-E/F', 'TE-T2', 'T2S-E/F', 'EF-T2', 'EF - T2', 'Type 2 - E/F', 'T2 – E/F',
'Borne SESAME de chez Sobem / Recharge de Type C , recharge accélérée, 2 prises sur chaque PDC : E/F et T2',
'Borne SESAME de chez Sobem / Recharge de Type C , recharge acc?l?r?e, 2 prises sur chaque PDC : E/F et T2',
'E/F-T5', 'E/F-T7', 'E/F + T4', 'T2*E']
list_t3_ef_x2 = ['EF - T3', 'T3 - EF', 'E/F + T3', 'EF/T3', 'TE-T3', 'T3 et EF', 'Type 3 - E/F', 'T3-EF',
'EF-T3', 'E/F-T3', 'T3-E/F']
list_t2_chademo_x2 = ['T2-CHAdeMO', 'Type 2 + CHAdeMO']
list_chademo_combo_x2 = ['CHADEMO - COMBO', 'CHAdeMO-Combo', 'Combo-Chademo', 'Combo2-CHAdeMO', 'CHAdeMo-Combo']
list_combo_ccs350_chademo_t2_x3 = ['CCS350-CCS350-CCS50-CHAdeMO - T2',
'CCS350-CCS350-CCS350-CCS350-CCS50-CHAdeMO - T2']
list_t2_t3_ef__x3 = ['EF - T2 - T3', 'EF - T2 - t3', 'T2-T3-EF', 'T3-EF-T2', 'T2-T2-EF']
list_chademo_combo_ef_x3 = ['A/C - Combo - CHAdeMO']
list_t2_combo_chademo_x3 = ['T2-Combo2-CHAdeMO', 'T2 Combo Chademo', 'Combo-ChaDeMo-T2', 'CHADEMO - COMBO -T2',
'CHAdeMO-Combo-T2 câble attaché']
#Intégration des colonnes booléennes
irvePlus['EF'] = False
irvePlus['Type 2'] = False
irvePlus['Type 3'] = False
irvePlus['Combo'] = False
irvePlus['Combo CCS350'] = False
irvePlus['Chademo'] = False
irvePlus['Tesla Supercharger'] = False
#Boucle itérative selon liste condition
for i, row in irvePlus.iterrows():
if row['type_prise'] in list_ef_x1:
irvePlus.loc[i, 'EF'] = True
elif row['type_prise'] in list_t2_x1:
irvePlus.loc[i, 'Type 2'] = True
elif row['type_prise'] in list_t3_x1:
irvePlus.loc[i, 'Type 3'] = True
elif row['type_prise'] in list_combo_x1:
irvePlus.loc[i, 'Combo'] = True
elif row['type_prise'] in list_combo_ccs350_x1:
irvePlus.loc[i, 'Combo CCS350'] = True
elif row['type_prise'] in list_chademo_x1:
irvePlus.loc[i, 'Chademo'] = True
elif row['type_prise'] in list_tesla_supercharger_x1:
irvePlus.loc[i, 'Tesla Supercharger'] = True
elif row['type_prise'] in list_t2_ef_x2:
irvePlus.loc[i, 'Type 2'] = True
irvePlus.loc[i, 'EF'] = True
elif row['type_prise'] in list_t3_ef_x2:
irvePlus.loc[i, 'Type 3'] = True
irvePlus.loc[i, 'EF'] = True
elif row['type_prise'] in list_t2_chademo_x2:
irvePlus.loc[i, 'Type 2'] = True
irvePlus.loc[i, 'Chademo'] = True
elif row['type_prise'] in list_chademo_combo_x2:
irvePlus.loc[i, 'Chademo'] = True
irvePlus.loc[i, 'Combo'] = True
elif row['type_prise'] in list_combo_ccs350_chademo_t2_x3:
irvePlus.loc[i, 'Type 2'] = True
irvePlus.loc[i, 'Chademo'] = True
irvePlus.loc[i, 'Combo CCS350'] = True
elif row['type_prise'] in list_t2_t3_ef__x3:
irvePlus.loc[i, 'Type 2'] = True
irvePlus.loc[i, 'Type 3'] = True
irvePlus.loc[i, 'EF'] = True
elif row['type_prise'] in list_chademo_combo_ef_x3:
irvePlus.loc[i, 'Chademo'] = True
irvePlus.loc[i, 'Combo'] = True
irvePlus.loc[i, 'EF'] = True
elif row['type_prise'] in list_t2_combo_chademo_x3:
irvePlus.loc[i, 'Type 2'] = True
irvePlus.loc[i, 'Chademo'] = True
irvePlus.loc[i, 'Combo'] = True
else:
pass
```
#### Traitement des valeurs manquantes identifiées dans le comptage des points de charge
```
#Identification des aménageurs concernés par le nombre de pdc manquant
irvePlus[irvePlus.nbre_pdc.isna()]['n_amenageur'].unique()
```
Notons que la diversité ci-dessus n'apporte aucune solution pour pouvoir identifier les 'nbre_pdc' manquants. L'option choisie ici, sera de comptabiliser les connecteurs (booléens) sous condition que la valeur de 'nbre_pdc' soit inconnue, dans le cas contraire la valeur d'origine sera conservée.
```
#Remplacement des valeurs manquantes par une valeur flottante 0.0
irvePlus.nbre_pdc.fillna(0.0, inplace=True)
#Remplacement des valeurs 0.0 par la somme des True Values correspondant aux connecteurs EF, Type 2, etc…
for i, row in irvePlus.iterrows():
if row['nbre_pdc'] == 0.0:
number = sum(irvePlus[['EF', 'Type 2', 'Type 3', 'Chademo', 'Combo', 'Combo CCS350', 'Tesla Supercharger']],
axis=1)
irvePlus.loc[i, 'nbre_pdc'] = number[i]
#Comptage des connecteurs suivant le type
display(irvePlus['EF'].value_counts())
display(irvePlus['Type 2'].value_counts())
display(irvePlus['Type 3'].value_counts())
display(irvePlus['Chademo'].value_counts())
display(irvePlus['Combo'].value_counts())
display(irvePlus['Combo CCS350'].value_counts())
display(irvePlus['Tesla Supercharger'].value_counts())
```
#### Enrichissement de l'échantillon en intégrant une catégorisation des aménageurs
Cette étape permettra de pouvoir obtenir une vision plus explicite de qui sont les aménageurs IRVE sur notre territoire. Il semble pertinent de pouvoir mieux comprendre comment s'organise l'implantation des bornes.
```
#Aperçu de la diversité des aménageurs à l'origine de l'implantation des bornes en France
irvePlus.n_amenageur.unique()[:30]
#Liste des catégories pouvant rassembler les aménageurs identifiées dans l'échantillon
#Collectivités territoriales
list_c_t = ['Aix-Marseille-Provence', 'BREST METROPOLE', 'CAPG', 'CAPL', 'CARF', 'CC VITRY CHAMPAGNE ET DER',
'CC de la Côtičre', 'CCPA', 'CCPHVA', 'CCVBA', 'CELLIEU', 'CGLE', 'CHARLIEU','CHAUSSON MATERIAUX',
'CHAZELLES SUR LYON', 'CNR', 'COMMELLE VERNAY',"Communauté Urbaine d'Arras", 'CANTAL', 'Aéroports de Paris SA',
"Communauté d'Agglomération Douaisis Agglo","Communauté d'Agglomération Maubeuge Val de Sambre", 'SODETREL ',
"Communauté d'Agglomération Valenciennes Métropole", "Communauté d'Agglomération du Boulonnais", 'SMOYS',
"Communauté d'Agglomération du Pays de Saint Omer", 'Communauté de Communes Flandre-Lys', 'SMEG 30',
'Communauté de Communes de la Haute Vallée de Chevreuse', "Communauté de Communes du Coeur d'Ostrevent",
'Communauté de Communes du Haut-Pays Montreuillois', "Communauté de Communes du Pays d'Opale",
'Communauté de Communes du Pays de Lumbres', "Commune d'Eguisheim",'FDEL 46', 'FDEL 46', 'FEURS',
'FONTANÈS', 'FRAISSES', 'GENILAC', 'GOLF CLUB DE LYON', 'GPSO-MEUDON', 'Grenoble-Alpes Métropole',
'Hauts-de-France', 'Herault Energies 34', 'ISTRES', "L'ETRAT", "L'HORME", 'LA FOUILLOUSE', 'LA GRAND CROIX',
'LA PACAUDIÈRE', 'LA RICAMARIE', 'LA TALAUDIÈRE', 'LA VALLA EN GIER', 'LE COTEAU', 'LORETTE','Le Pont du Gard',
'MABLY', 'MARLHES', 'MONTAGNY', 'MONTBRISON', 'MOUVELECVAR', 'MRN', 'Modulo (Mobilité Locale Durable)',
'Montpellier Mediterranee Metropole', 'Métropole Européenne de Lille', 'NEULISE', 'ORLEANS METROPOLE',
'PANISSIERES', 'PARIGNY', 'PERREUX','REGNY', 'RENAISON', 'RIORGES', 'ROANNE', 'ROCHE LA MOLIÈRE',
'SABLE SUR SARTHE', "SAINT ANDRÉ D'APCHON", 'SAINT ANDRÉ LE PUY', 'SAINT BONNET LE CHÂTEAU',
'SAINT CHRISTO EN JAREZ', 'SAINT CYR', 'SAINT ETIENNE ROCHETAILLÉE', 'SAINT ETIENNE SAINT VICTOR SUR LOIRE',
'SAINT GALMIER', 'SAINT GENEST LERPT', 'SAINT HÉAND', 'SAINT JUST SAINT RAMBERT', 'SAINT LÉGER SUR ROANNE',
'SAINT MARCELLIN EN FOREZ', 'SAINT MARTIN LA PLAINE', 'SAINT MAURICE EN GOURGOIS', 'SAINT PAUL EN JAREZ',
'SAINT ROMAIN EN JAREZ', 'SAINT ROMAIN LES ATHEUX', 'SAINT SAUVEUR EN RUE', 'SAINT SYMPHORIEN DE LAY', 'SAINT-LOUIS', 'SAINTE CROIX EN JAREZ',
'SALVIZINET', 'SAVIGNEUX', 'SDE 18', 'SDE 23', 'SDE 56', 'SDE 65', 'SDE07', 'SDE09', 'SDE29', 'SDE65', 'SDE76',
'SDEA10', 'SDED', 'SDEE48 48', 'SDESM', 'SDET 81', 'SDEY', "SDEY Syndicat Departemental d'Energies de l'Yonne",
'SE60', 'SEDI', 'SIDELC', 'SIED70', 'SIEDA 12', 'SIEEEN', 'SIEGE 27', 'SIEIL37', 'SIEML 49', 'SIPPEREC',
'SMA PNR Gatinais', 'SMED 13', 'SORBIERS', 'SOREGIES', 'SURY LE COMTAL', 'SYADEN 11', 'SYANE', 'SYDED',
'SYDEEL66 66', 'SYDESL', 'SYDEV 85', 'SYME05', 'Se 61', 'TE 53', "TERRITOIRE D'ENERGIE 90", 'Séolis', 'S‚olis',
"Syndicat Départemental d'Énergie de Loire-Atlantique (SYDELA)", 'FDEE 19', 'SDEPA 64', 'SDEG 16',
"Syndicat Départemental d'Énergies d'Eure et Loir (SDE28)", 'SDEE 47', 'SDEER 17', 'SYDEC 40',
"Syndicat Intercommunal de Distribution d'Electricité de Loir-et-Cher (SIDELC41)", 'SDE 24', 'SDEEG 33',
"Syndicat de l'Énergie de l'Orne (TE61)", 'Toulouse Metropole', 'UNIEUX', 'USEDA', 'USSON EN FOREZ',
'VEAUCHE', 'VILLARS', 'VILLE DE CAVAILLON', 'VILLE DE GAP', 'VILLE DE ROSHEIM', 'VILLEREST', "Ville d'Hazebrouck",
'Ville de Garches', 'Ville de Montrouge', 'Ville de Revel', 'Ville de Saverne', 'Ville de Viriat',
'Arcs 1950 Le Village - Parking', 'B&B Hôtel Lyon Eurexpo Chassieu', "Bastide Selva - Maison d'Hôtes",
'Baumanière les Baux de Provence', 'Belle Isle sur Risle','Benvengudo Hôtel Restaurant',
'Best Western Amarys Rambouillet', 'Best Western Golf Hôtel Lacanau','Best Western Grand Hôtel de Bordeaux',
'Best Western Hotel Alexandra', 'Best Western le Lavarin', 'Best Western Plus - Hôtel de la Paix',
'Best Western Plus - Hôtel de la Régate', 'Best Western Plus Cannes Riviera & spa',
'Best Western Plus Excelsior Chamonix', 'Best Western Plus Santa Maria', 'Brasserie des Eclusiers',
'Buffalo Grill de Foix', 'Caffe Mazzo', 'Camping BelleRive', 'Camping du Domaine de Massereau',
"Camping Ecolodge de l'Etoile d'Argens", 'Camping La Fontaine du Hallate en Morbihan', "Camping La Roche d'Ully",
'Camping Le Brasilia', 'Camping Palmira Beach', 'Camping Sunêlia Berrua', 'Camping Sunêlia Le Fief *****',
"Casino d'Évian - Evian Resort", "Casino d'Andernos - Le Miami", 'Casino De Plombières-Les-Bains',
'Casino de Pornichet', 'Casino Joa Antibes La Siesta', 'Casino JOA Le Boulou', 'Casino Le Domaine de Forges',
'Casino Partouche de Boulogne-sur-Mer', 'Casino Partouche de Palavas Les FLots','Castel Camping Le Brévedent',
'Castel Maintenon']
#Constructeurs Auto
list_auto = ['IONITY', 'Tesla', 'A Cheda', 'NISSAN', 'RENAULT']
#Parkings
list_parking = ['EFFIA', 'Alyse Parc Auto', 'Parking Bodin', 'Parking François 1er Interparking', 'TM _Parking']
#Centres commerciaux
list_centres_commerciaux = ['Centre commercial Grand Var', 'GEMO', 'Sičge Intermarché', 'Supermarchés COLRUYT', 'LECLERC', 'AUCHAN ', 'LECLERC',
'Centre Commercial Carrefour Villiers en Bière', 'Centre commercial Les Eléis', 'Centre Commercial Parly 2',
'Centre Commercial Waves Actisud', 'E-Leclerc Paray-le-Monial', 'Hyper U Sierentz', "Intermarché l'Isle sur le Doubs",
'Intermarché Mont près Chambord', 'Intermarché Ramonville', 'intermarché verneuil',
'Parc Commercial Les Portes de Soissons', 'Usines Center', 'CASA']
#Opérateurs privés
list_op_prive = ['SODETREL', 'IZIVIA', 'ELECTRIC 55 CHARGING', 'PLUS DE BORNES', 'BE TROM', 'BOEN', 'DOCUWORLD']
#Entreprises diverses
list_entreprise_diverse = ["Cattin - Grands Vins & Crémants d'Alsace", 'Caves Carrière', 'Champagne Bergere', 'Champagne Drappier',
'Champagne J de Telmont', 'Champagne Paul Dethune', 'Champagne Pertois-Moriset',
'Domaine Viticole Château de Chamirey', 'Dopff au Moulin', 'Jet Systems Hélicoptères Services']
#Hotels, restaurants, tourisme
list_tourisme = ["A L'Ecole Buissonière", 'Aa Saint-Omer Golf Club', 'Abbaye de Bussiere sur Ouche ', 'Abbaye de Talloires',
'Aigle des Neiges Hotel', 'Altapura', 'Aparthotel Adagio Genève Saint Genis Pouilly', 'Atmosphères Hôtel',
'Au Grès des Ouches', 'Au Pont Tournant', 'Auberge Bienvenue', 'Auberge Bressane de Buellas',
'Auberge de Cassagne & Spa ', 'Auberge de la Petite Reine', 'Auberge du Lac', 'Auberge du Mehrbächel',
'Auberge du Vieux Puits', 'Auberge Edelweiss', 'Auberge Ostapé', 'Auberge Sundgovienne', 'Aux Terrasses',
'Avancher Hôtel & Lodge, Restaurant & Bar', 'Château Beauregard', "Château d'Audrieu", "Château d'Igé****",
"Château d'Isenbourg Hôtel Restaurant", 'Château Dauzac', 'Château de Beaulieu', 'Château de Belmesnil',
'Château de Challanges', 'Château de Chapeau Cornu', 'Château de Chenonceau', 'Château de Clérac',
'Château de Germigney R&C Port-Lesney', 'Château de Gilly', "Château de l'Hoste", "Château de l'Ile",
'Château de la Presle', 'Château de la Treyne - Relais & Château', 'Château de Locguénolé',
'Château de Massillan', 'Château de Nazelles', 'Château de Noirieux', 'Château de Quesmy',
'Château de Riell - Relais & Châteaux', 'Château de Sacy', 'Château de Sissi', 'Château de St Paul',
'Château de Valmer', 'Château de Vault-de-Lugny', 'Château des Ducs de Joyeuse', 'Château du Galoupet',
'Château Fombrauge', 'Château Guiraud', 'Château Hôtel le Boisniard', 'Château Hourtin-Ducasse',
'Château La Coste', 'Château La Fleunie Hôtel/Restaurant', 'Château La Tour Carnet', 'Château Laborde Saint-Martin',
'Château Pape Clément', 'Château Sainte Sabine', 'Château Soutard', 'Château Talluy', 'Château Vignelaure',
'Châteaux de la Messardiere',"Chalet L'Orignal", 'Chalet M la Plagne', 'Chalet Marano Hôtel Restaurant & Spa',
"Chalet-Hôtel Le Chamois d'Or", "Chambre d'hôtes Le Crot Foulot", 'Charmhotel Au Bois le Sire',
'Chateau de Courban & Spa Nuxe', 'Château des Demoiselles', 'Chateau MontPlaisir', 'Chateau Prieuré Marquet',
'Circuit Paul Ricard', 'Circuits Automobiles LFG', 'Clos des Sens', 'Clos Marcamps', 'Club Les Ormes', 'CosyCamp',
'Courtyard Paris Roissy CDG', 'Crowne Plaza Montpellier Corum', 'Domaine Château du Faucon',
"Domaine d'Auriac - Relais & Châteaux", "Domaine d'Essendiéras", 'Domaine de Barive', 'Domaine de Barres',
'Domaine de Bournel', 'Domaine de Cabasse', 'Domaine de Crécy', 'Domaine de Divonne', "Domaine de l'Hostreiere",
'Domaine de la Corniche', "Domaine de la Forêt d'Orient - Hôtel Golf & Spa", 'Domaine de la Poignardiere',
'Domaine de la Tortinière', 'Domaine de la Tour', 'Domaine de Manville', 'Domaine de Mialaret',
'Domaine de Rochevilaine', 'Domaine de Saint-Géry', 'Domaine de Vaugouard', 'Domaine de Verchant',
'Domaine des Andéols', 'Domaine des Etangs', 'Domaine des Séquoias', 'Domaine du Bailli',
'Domaine du Château de Meursault', 'Domaine du Clos Fleuri', 'Domaine du Moulin', 'Domaine du Prieuré',
'Domaine du Revermont', 'Domaine Lafage', 'Domaine Selosse - Hôtel Les Avisés', 'Emerald Stay Apartments Morzine',
'Espace Montagne Grenoble', 'Eurotel', 'Evian Resort Golf Club', 'Ferme de la Rançonnière', 'Flocons de Sel',
'Gîte des Prés de Garnes', 'Gîte La Mystérieuse Ponts sur Seulles', 'Gîtes Bon Air Chalets Piscine Spa',
'Golden Tulip Le Grand Bé Saint Malo', 'Golden Tulip Sophia Antipolis', 'Golf Cap Malo', 'Golf Club Omaha Beach',
'Golf de Barbaroux - Open Golf Club', 'Golf de la Prée la Rochelle', 'Golf de la Sainte Baume - Open Golf Club',
'Golf de Marseille la Salette - Open Golf Club', 'Golf de Servanes - Open Golf Club',
'Golf du Touquet - Open Golf Club', 'Golf Hôtel Restaurant du Kempferhof', 'Golf International de Grenoble',
'Golf Les Gets', 'Grand Hôtel des Alpes', 'Grand Hôtel des Thermes', 'Grand Hotel La Cloche',
'Grand Parc du Puy du Fou', 'Hôtel-Restaurant & SPA Les Gentianettes', 'Hôtel-Restaurant Kleiber',
'Hôtel-Restaurant Le Grand Turc', 'Hôtel-Restaurant Le Mas du Terme', 'Hôtel & Spa Best Western Plus - Chassieu',
"Hôtel & Spa L'Equipe", 'Hôtel & Spa Les Violettes', 'Hôtel 202', 'Hôtel A Madonetta', 'Hôtel Akena',
'Hôtel AKENA de Saint-Witz', 'Hôtel Akena Dol de Bretagne', 'Hôtel Ampère', 'Hôtel Atena',
'Hôtel Au Coeur du Village', 'Hôtel B&B Colmar Expo', 'Hôtel Barrière - le Grand Hôtel Dinard',
'Hôtel Barrière Le Normandy Deauville', 'Hôtel Barrière Le Westminster', 'Hôtel Best Western Plus Metz Technopôle',
'Hôtel Cézanne', 'Hôtel Cala Di Greco', 'Hôtel Cap-Estel', 'Hôtel Capao', 'Hôtel Castel Burgond',
'Hôtel Castel Mouisson', 'Hôtel Cayrons', 'Hôtel Château de la Begude - Golf Opio Valbonne',
'Hôtel Château de la marlière', 'Hôtel Chais Monnet', 'Hôtel Champs Fleuris', 'Hôtel Chapelle et Parc',
'Hôtel Chez Camillou - Restaurant Cyril ATTRAZIC', 'Hôtel Cour des Loges', "Hôtel d'Angleterre",
'Hôtel Daumesnil-Vincennes', 'Hôtel de France', 'Hôtel de Greuze', 'Hôtel de la Cité', 'Hôtel des Dunes',
'Hôtel des Princes', 'Hôtel Diana Restaurant & Spa', 'Hôtel du Bois Blanc', 'Hôtel du Cap-Eden-Roc',
'Hôtel du Palais', 'Hôtel Escapade', 'Hôtel Fleur de Sel', 'Hôtel Golf Château de Chailly', 'Hôtel Ha(a)ïtza',
'Hôtel Husseren-les-Châteaux', 'Hôtel ibis Besançon Centre Ville', 'Hôtel Juana',
'Hôtel Kyriad Prestige Clermont-Ferrand', 'Hôtel Kyriad Prestige Lyon Saint-Priest Eurexpo',
'Hôtel Kyriad Prestige Strasbourg Nord', 'Hôtel Kyriad Prestige Vannes', "Hôtel l'Angleterre",
"Hôtel L'Estelle en Camargue ", 'Hôtel La Chaumière', 'Hôtel La Ferme', "Hôtel La Ferme D'Augustin",
'Hôtel La Sivolière', 'Hôtel La Villa', 'Hôtel La Villa Douce', 'Hôtel la Villa K', 'Hôtel Le Bellevue',
'Hôtel Le Bristol Paris', 'Hôtel Le Burdigala', 'Hôtel le Cèdre', 'Hôtel Le Capricorne', 'Hôtel Le Cep',
'Hôtel le Clos', 'Hôtel le M de Megève', 'Hôtel Le Mas des Herbes Blanches', 'Hôtel Le Morgane',
'Hôtel le Pic Blanc', 'Hôtel Le Relais des Champs', 'Hôtel Le Rivage', 'Hôtel Le Royal Barrière Deauville',
'Hôtel Le Vallon de Valrugues & Spa', 'Hôtel Les Airelles', 'Hôtel Les Bartavelles & SPA', 'Hôtel Les Bories & Spa',
'Hôtel Les Bouis', 'Hôtel Les Colonnes', 'Hôtel Les Esclargies', 'Hôtel Les Glycines et Spa', 'Hôtel Les Gravades',
'Hôtel Les Maritonnes Parc & Vignoble', 'Hôtel Les Trésoms', 'Hôtel Lodges Ste Victoire & Restaurant St-Estève',
'Hôtel Logis Châteaudun', 'Hôtel Lyon Métropole', 'Hôtel Marriott Roissy Charles de Gaulle Airport',
'Hôtel Mercure Côte Ouest Thalasso & Spa', 'Hôtel Mercure Caen Centre', 'Hôtel Mercure Epinal Centre',
'Hôtel Mercure Omaha Beach', 'Hôtel Mercure Reims Centre Cathedrale', 'Hôtel Miramar', 'Hôtel Mont-Blanc',
'Hôtel Negrecoste', 'Hôtel Parc Beaumont ', 'Hôtel Parc Victoria', 'Hôtel Parkest', 'Hôtel Radisson Blu 1835',
'Hôtel Radisson Blu Biarritz', "Hôtel Restaurant A l'Etoile", 'Hôtel Restaurant Alliance Couvent des Minimes',
'Hôtel Restaurant Au Boeuf Rouge', 'Hôtel Restaurant de la Tabletterie', 'Hôtel Restaurant des Bains',
'Hôtel Restaurant Edward 1er', 'Hôtel Restaurant Kyriad Montauban', 'Hôtel Restaurant La Ferme de Cupelin',
'Hôtel Restaurant Le Beauregard', 'Hôtel Restaurant Le Cerf', 'Hôtel Restaurant Le Noirlac',
'Hôtel Restaurant Le Tropicana', 'Hôtel Restaurant Les Oliviers', 'Hôtel Royal - Evian Resort',
'Hôtel Sezz Saint-Tropez - Restaurant Colette', 'Hôtel Stella', 'Hôtel U Capu Biancu',
'Hôtel, Restaurant Le Belvedere', 'Holiday Inn Blois centre ', 'Holiday Inn Express Paris - Velizy',
'Holiday Inn Lyon - Vaise', 'Honfleur Normandy Outlet', 'Hostellerie de la Pointe Saint Mathieu',
'Hostellerie de Levernois', 'Hostellerie La Briqueterie', 'Hostellerie La Farandole', 'Hostellerie Le Cèdre',
'Hotel & Spa Le Dahu', 'Hotel Alpen Roc', 'Hotel Bel Air - Brasserie La Terrasse', 'Hotel Castelbrac',
'Hotel du Clocher Villa Savoy ***', 'Hotel Ibis Manosque Cadarache', 'Hotel ibis Saint Brieuc Yffiniac',
'Hotel Imperial Garoupe', 'Hotel Koh-I Nor', "Hotel L'Alta Peyra", 'Hotel Le Club de Cavalière & Spa',
'Hotel Le Kaïla', 'Hotel le Manoir Saint Michel', 'Hotel Le Mans Country Club', 'Hotel le Montrachet',
'Hotel Le Pigonnet', 'Hotel Le Tillau', 'Hotel Les Bains de Cabourg - Thalazur', 'Hotel Maison Bras',
'Hotel Marina Corsica Porto Vecchio', 'Hotel Mercure Bordeaux Château Chartrons', 'Hotel Normandie',
'Hotel Restaurant de la poste', 'Hotel Restaurant Ferme Blanche', 'Hotel Restaurant Le Viscos',
'Hotel Restaurant Spa Le Rabelais', 'Hotel Royal Riviera', 'hotel Taj-I Mah*****',
'Hotel The Originals Domaine de La Groirie', 'Hotel The Originals Nantes Ouest Agora',
'Hotel-Restaurant Au Chêne Vert', 'Hyatt Paris Madeleine', 'Ibis Cergy Pontoise Le Port', 'Ibis La Roche sur Yon',
'Ibis Roanne', 'Ibis Styles - Mulsanne', 'Ibis Styles Mâcon Centre', 'Ibis Styles Paris Mairie de Clichy',
'Ibis Styles Tours Sud', 'Inter Hotel Acadie tremblay en france', 'Inter-Hôtel Alteora site du Futuroscope',
'Inter-Hôtel de la Chaussairie', 'Inter-Hôtel Le Cap', 'Inter-Hôtel Roanne Hélios', 'Inter-Hotel Albi le Cantepau',
'Inter-Hôtel du Lac', 'Inter-Hotel Ecoparc Montpellier Est', 'Inter-Hotel Saint Martial',
'Isulella Hôtel & Restaurant', 'Jiva Hill Resort', "Jum'Hôtel - Restaurant Atelier Grill",
'Kon Tiki - Riviera Villages ', 'Kube Hôtel Saint-Tropez', 'Kyriad Clermont-Ferrand Centre',
"L'Apogée Courchevel", "L'Assiette Champenoise", "L'Atelier", "L'atelier d'Edmond",
"L'Enclos Béarnais Maison d'hôtes", "L'Impérial Palace", "L'Oustalet Gigondas", "l'Oustau de Baumanière",
'La Bastide de Gordes', 'La Bastide de Tourtour Hôtel & Spa ', 'La Côte Saint Jacques & Spa',
'La Cheneaudière & Spa - Relais & Châteaux', 'La Coquillade Provence Village', 'La Ferme du Chozal',
'La Gentilhommiere', 'La Grande Maison de Bernard Magrez ', 'La Grande Terrasse Hôtel & Spa Mgallery',
'La Guitoune', 'La Jasoupe', 'La Maison de Rhodes', 'La Malouiniere des Longchamps', 'La Pinède Plage',
'La Pyramide Patrick Henriroux', 'La Réserve', 'La Réserve des Prés Verts Massages & Spa', 'La Réserve Ramatuelle',
'La Signoria - Relais & Châteaux', 'La Tannerie de Montreuil', 'La Vaucouleurs Golf Club', 'Lagardère Paris Racing',
'Le Barn', 'Le Beau Rivage', 'Le Binjamin', 'Le Bois Joli', 'Le Brittany & Spa', 'Le Château de la Tour',
'Le Chambard Relais & Châteaux', 'Le Clos de la Ribaudiere', 'Le Clos de Serre', 'Le Clos des Délices',
'Le Clos Saint Vincent', 'Le Clos Saint-Martin Hôtel & Spa', "Le Couvent des Minimes Hotel &SPA L'Occitane",
'Le Domaine de Montjoie', 'Le Domaine des Prés Verts Massages & Spa', "Le Fouquet's", 'Le Gîte de Garbay ',
'Le Grand Aigle Hôtel & Spa', "Le Grand Casino d'Annemasse ", 'Le Grand Hôtel Cannes',
"Le Grand Hôtel de l'Espérance", 'Le grand Monarque', 'Le Hameau Albert 1er', 'Le Hommet',
'Le Majestic Barrière Cannes', 'Le Manoir de Kerbot', 'Le Manoir des Impressionnistes',
'Le Mas Candille, Relais & Châteaux', 'Le Moulin de Vernègues', 'Le Palace de Menthon', 'Le Petit Nice Passedat',
'Le Phebus & Spa', 'Le Pigeonnier du Perron', 'Le Prieuré', 'Le Prieuré des Sources',
'Le Refuge des Près Verts Massages & Spa', 'Le Relais Bernard Loiseau', 'Le Relais du Boisniard', 'Le Richelieu',
'Le Saint-Barnabé Hôtel et Spa ', 'Le Saint-James', 'Les Châtaigniers de Florac', 'Les Cures Marines',
'Les Etangs de Corot', 'Les Fermes de Marie', 'Les Hôtels de Beauval', 'Les Haras Hôtel ', 'Les Hauts de Loire',
'Les Maisons de Bricourt', 'Les Manoirs Tourgeville', 'Les Orangeries', "Les Prés d'Eugénie - Michel Guérard",
'Les Prairies de la Mer', 'Les Sources de Caudalie', 'Les Terrasses du Port', "Les Vignobles de l'Escarelle",
'Logis Aigue Marine Hôtel', "Logis Au Comté D'Ornon", 'Logis Auberge de la Diège', 'Logis Auberge de la Tour',
'Logis Château de la Motte-Liessies', 'Logis Château de Labro', 'Logis Domaine du Relais de Vincey',
'Logis Grand Hôtels des Bains', 'Logis Hôtel & Spa Marina Adelphia', 'Logis Hôtel Acotel', "Logis Hôtel AR Milin'",
'Logis Hôtel Arcombelle', 'Logis Hôtel Bellevue', 'Logis Hôtel Center Brest', 'Logis Hôtel de la Clape',
'Logis Hôtel des Châteaux', 'Logis Hôtel des Elmes - Restaurant la Littorine', 'Logis Hôtel du Cheval Blanc',
'Logis Hôtel Le Prince Noir', 'Logis Hôtel Le Régent', 'Logis Hôtel le Régina', 'Logis Hôtel le Vernay',
'Logis Hôtel les 2 Rives', 'Logis Hôtel Les Pierres Dorées', 'Logis Hôtel Murtel',
'Logis Hôtel Restaurant Au cheval blanc', 'Logis Hôtel Restaurant La Brèche de Roland',
'Logis Hôtel Restaurant Spa Les Peupliers', 'Logis Hôtel Taillard', 'Logis Hostellerie du Périgord Vert',
'Logis Hostellerie Saint Vincent ', 'Logis Hotel le Céans', 'Logis Hotel Restaurant des Acacias',
"Logis L'Abreuvoir Hôtel Restaurant", "Logis L'Hôtel D'Arc", "Logis L'Orée du Bois", 'Logis La Résidence',
'Logis La Source du Mont', 'Logis Lacotel', 'Logis Le Moulin de la Coudre',
'Logis Le Moulin des Gardelles Hôtel-Restaurant', 'Logis Le Relais des Dix Crus', 'Logis Les Hauts de Montreuil',
'Logis Mas de la Feniere', 'Logis Relais du Gué de Selle', 'Lorraine Hôtel',
'M Gallery - La Cour des Consuls Hotel & Spa', 'Maison Addama', 'Maison Cazes', "Maison d'Hotes La Cimentelle",
'Maison des Algues', 'Maison Lameloise', 'Maison Pic', 'Mama Shelter', 'Mama Shelter Lyon',
'Mama Shelter Marseille', 'Manoir de Gressy', 'Manoir de la Poterie & SPA', 'Manoir de Pancemont',
'Manoir de Surville', 'Manoir Plessis Bellevue', 'Mas de Chastelas', 'Mas de la Crémaillère',
'Mas de la Grenouillère', 'Mas la Jaina', 'Mercure Bourges Hôtel de Bourbon', 'Mercure Cherbourg Centre Port',
'Mercure Grand Hotel des Thermes', 'Mercure Lille Centre Vieux Lille', 'Mercure Lyon Genas Eurexpo',
'Mineral Lodge', 'Misincu', 'MOB Hotel Lyon', 'Monte Carlo Beach Hôtel', 'Musée Würth France Erstein',
'Najeti Hôtel Château Tilques', "Najeti Hôtel de l'Univers", 'Najeti Hôtel La Magnaneraie',
'New Cottage & Spa de nage', 'Nouvel Hôtel', 'Novotel Chartres', 'Novotel La Rochelle Centre',
'Novotel Marseille Centre Prado Vélodrome', 'Novotel Noisy Marne la Vallée', 'Novotel Spa Rennes Centre Gare',
'Novotel Thalassa Dinard', 'Orée de Chartres', "Pêche de Vigne Spa et Maison d'Hôtes", 'Parc zoologique Cerza',
'Paris International Golf Club', 'Petit Hôtel Confidentiel', 'Pierre et Vacances Premium Le Crotoy',
"Pierre et Vacances Premium Les Terrasses d'Eos", "Pierre et Vacances Premium Presqu'Ile de la Touques",
'Pizza Del Arte', 'Plaza Madeleine', 'Punta Lara', "Qualys Hôtel d'Alsace", "Qualys Hôtel du Golf de l'Ailette",
'Qualys-Hotel Grand Hôtel Saint Pierre', 'Résidence de France', 'Résidence Le Balamina', 'Radisson Blu Hôtel Nice',
'Relais & Châteaux - La Ferme Saint Siméon', 'Relais & Châteaux Georges Blanc Parc & Spa', 'Relais Christine',
'Relais du Silence - Château de Perreux', 'Relais du Silence - Le Mas de Guilles',
'Relais du Silence Domaine du Normandoux', 'Relais du Silence Ker Moor Préférence',
'Relais du Silence La Mainaz Hôtel Restaurant', 'Relais du Silence Les Vignes de la Chapelle',
'Relais du Silence Manoir de la Roche Torin', 'Relais Thalasso Chateau des Tourelles',
'Relais Thalasso Hotel Atalante', 'Renaissance Arc de Triomphe', 'Resort Barrière Lille',
'Resort Barrière Ribeauvillé', 'Resort Résidence Pierre', 'Restaurant Del Arte', 'Restaurant DEL ARTE Ploërmel',
'Restaurant La Chaudanne', 'Restaurant La Ferme Saint Michel', "Restaurant La Grande Cascade - L'Auberge du Bonheur",
'Restaurant Les Amis du Lac', 'Ristorante Del Arte', 'Saint Charles Hôtel & Spa', 'Saint James Paris ',
'SAS Louis Moreau', 'Shangri-La Hotel Paris', 'SNIP Yachting', 'Splendid Hôtel & Spa', 'Stiletto Cabaret',
'Stras Kart', 'Sunélia Aluna Vacances', 'Sunêlia Camping du Ranc Davaine', 'Sunêlia Domaine de la Dragonnière',
'Sunêlia Domaine Les Ranchisses', 'Sunêlia La Ribeyre', 'Sunêlia Les 3 Vallées',
'Sunêlia Perla di Mare camping restaurant', 'Télécabine du Mont-Chéry', 'Terre Blanche Hotel Spa Golf Resort',
'Territoires Charente - ZAC Montagnes Ouest', "Toison d'Or", 'Valthoparc', 'Vichy Célestins Spa Hôtel',
'Villa Duflot', 'Villa Florentine - Restaurant Les Terrasses de Lyon', 'Villa Garbo Cannes', 'Villa La Coste',
'Villa Maïa', 'Villa Magnolia Parc', 'Villa Mas St Jean', 'Villa Morelia', 'Villa Regalido', 'Villa René Lalique',
'Village Les Armaillis', 'Vincent Cuisinier de Campagne', 'Yelloh Village Camping Le Sérignan-Plage',
'Yelloh Village Les Grands Pins', 'Yelloh Village Les Tournels']
#Intégration d'une nouvelle variable 'categ_amenageur' selon condition
irvePlus['categ_amenageur'] = irvePlus['n_amenageur'].copy()
for x in irvePlus['categ_amenageur']:
if x in list_c_t:
irvePlus['categ_amenageur'].replace(x, 'Collectivités territoriales', inplace=True)
elif x in list_auto:
irvePlus['categ_amenageur'].replace(x, 'Constructeurs Automobiles', inplace=True)
elif x in list_parking:
irvePlus['categ_amenageur'].replace(x, 'Sociétés de Parking', inplace=True)
elif x in list_centres_commerciaux:
irvePlus['categ_amenageur'].replace(x, 'Centres commerciaux', inplace=True)
elif x in list_op_prive:
irvePlus['categ_amenageur'].replace(x, 'Opérateurs privés', inplace=True)
elif x in list_entreprise_diverse:
irvePlus['categ_amenageur'].replace(x, 'Entreprises diverses', inplace=True)
elif x in list_tourisme:
irvePlus['categ_amenageur'].replace(x, 'Hôtels, Restaurants…', inplace=True)
else:
pass
```
#### Enrichissement de l'échantillon en intégrant code département, département et région
L'API Google Géocoding a été utilisée de manière à pouvoir extraire les données de géolocalisation attendues. Après quelques essais, plusieurs coordonnées 'Latitude' et 'Longitude' ont pu être identifiées comme non conformes (inversion de coordonnées, problème de format, etc…), un traitement au cas par cas de ces anomalies a été fait pour pouvoir utiliser l'API.
```
#Intervention sur quelques coordonnées atypiques
irvePlus['Ylatitude'].replace("43*96228900", 43.96228900, inplace=True)
irvePlus['Xlongitude'].replace('6?07\'44.1"E', 6.07441, inplace=True)
irvePlus['Xlongitude'].replace('6›09\'34.8"E', 6.09348, inplace=True)
#Changement du type de données sur les variables Latitude et Longitude
irvePlus['Ylatitude'] = irvePlus['Ylatitude'].astype(float)
irvePlus['Xlongitude'] = irvePlus['Xlongitude'].astype(float)
#Traitement des observations en anomalie après avoir effectué quelques tentatives
irvePlus.loc[1442, 'Ylatitude'] = 43.279831
irvePlus.loc[1442, 'Xlongitude'] = 6.577639
irvePlus.loc[1477, 'Ylatitude'] = 43.279831
irvePlus.loc[1477, 'Xlongitude'] = 6.577639
irvePlus.loc[1505, 'Ylatitude'] = 43.279831
irvePlus.loc[1505, 'Xlongitude'] = 6.577639
irvePlus.loc[2059, 'Ylatitude'] = 45.889087
irvePlus.loc[2059, 'Xlongitude'] = 4.893406
irvePlus.loc[2078, 'Ylatitude'] = 47.031041
irvePlus.loc[2078, 'Xlongitude'] = 5.108918
irvePlus.loc[8527, 'Ylatitude'] = 43.608195
irvePlus.loc[8527, 'Xlongitude'] = 5.003735
irvePlus.loc[8543, 'Ylatitude'] = 43.608195
irvePlus.loc[8543, 'Xlongitude'] = 5.003735
irvePlus.loc[10071, 'Ylatitude'] = 46.3026926
irvePlus.loc[10071, 'Xlongitude'] = 4.8321937
irvePlus.loc[10072, 'Ylatitude'] = 46.3027089
irvePlus.loc[10072, 'Xlongitude'] = 4.8234389
irvePlus.loc[10073, 'Ylatitude'] = 46.3026926
irvePlus.loc[10073, 'Xlongitude'] = 4.8321937
irvePlus.loc[10074, 'Ylatitude'] = 46.276451
irvePlus.loc[10074, 'Xlongitude'] = 4.038723
irvePlus.loc[10075, 'Ylatitude'] = 46.276451
irvePlus.loc[10075, 'Xlongitude'] = 4.038723
irvePlus.loc[10076, 'Ylatitude'] = 46.3027089
irvePlus.loc[10076, 'Xlongitude'] = 4.8234389
irvePlus.loc[13671, 'Ylatitude'] = 45.271378
irvePlus.loc[13671, 'Xlongitude'] = 0.043441
irvePlus.loc[13672, 'Ylatitude'] = 45.271378
irvePlus.loc[13672, 'Xlongitude'] = 0.043441
irvePlus.loc[13683, 'Ylatitude'] = 45.886326
irvePlus.loc[13683, 'Xlongitude'] = 0.582253
irvePlus.loc[13684, 'Ylatitude'] = 45.886326
irvePlus.loc[13684, 'Xlongitude'] = 0.582253
```
#### Attention !
__Le code suivant nécessite une clé d'API Google Geocode, non mis à disposition.
La variable "list_cp" a été sauvegardée pour éviter de lancer le script à chaque
exécution du Notebook, +/- 1 heure de temps.__
```
%%time
#Code permettant de préciser les codes postaux des bornes de recharge de l'échantillon
from urllib.request import urlopen
import sys
import json
from sys import stdout
from time import sleep
list_cp = []
for i, row in irvePlus.iterrows():
key = "*********************************"
url = "https://maps.googleapis.com/maps/api/geocode/json?"
url += "latlng=%s,%s&sensor=false&key=%s" % (row['Ylatitude'], row['Xlongitude'], key)
v = urlopen(url).read()
j = json.loads(v)
components = j['results'][0]['address_components']
for c in components:
if "postal_code" in c['types']:
cp = c['long_name']
list_cp.append(cp)
else:
pass
sys.stdout.write('\r' "Progress. "+ str(i+1) + "/" +str(len(irvePlus)) + " >>>>>>> ")
sys.stdout.flush()
```
Progress. 16112/16112 CPU times: user 4min 42s, sys: 24.6 s, total: 5min 7s
Wall time: 1h 15min 19s
A partir de la liste 'list_cp' on peut modifier les données de manière à obtenir les codes des départements, et donc enrichir l'échantillon d'une localisation selon les départements du pays.
```
#Sauvegarde de la variable
import pickle
#pickle.dump(list_cp, open('p8_datatable/list_cp.pickle', 'wb'))
with open('p8_datatable/list_cp.pickle', 'rb') as f:
list_cp = pickle.load(f)
#Création d'une liste propre aux codes des départements
cd = []
for c in list_cp.astype(str):
cd.append(c[:2])
#Intégration des nouvelles variables dans l'échantillon
irvePlus['code_postal'] = list_cp
irvePlus['code_dpt'] = cd
#Visualisation rapide de quelques observations
irvePlus[6000:6005]
#Visualisation des codes départements
irvePlus.code_dpt.unique()
#Modification de quelques codes pour pouvoir ensuite effectuer une jointure sans défaut
code_modif = ['01', '02', '03', '04', '05', '06', '07', '08', '09' ]
for x in irvePlus['code_dpt']:
if x in code_modif:
irvePlus['code_dpt'].replace(x, x[1:], inplace=True)
#Précision apportée à la Corse avec différenciation entre 2A et 2B
irvePlus.code_dpt.replace('20', '2A', inplace=True)
code_dpt_2b = [14106, 14107, 14662, 14663, 15070, 15071, 15377, 15378, 15379, 15561, 15562, 15799, 15800]
for i, row in irvePlus.iterrows():
if i in code_dpt_2b:
irvePlus.loc[i, "code_dpt"] = '2B'
#Enrichement des départements et régions via le fichier 'departements-francais.csv'
#Source : https://www.regions-et-departements.fr/departements-francais
dpt_fr = pd.read_csv('p8_data/departements-francais.csv', sep=';')
dpt_fr.rename(columns={'NUMÉRO': 'code_dpt', 'NOM': 'dpt', 'REGION': 'region',
'SUPERFICIE (km²)': 'superficie_km2', 'POPULATION': 'nbre_habitant'}, inplace=True)
dpt_fr.head()
#Jointure entre l'échantillon et le référentiel des départements et régions
irvePlus = pd.merge(irvePlus, dpt_fr[['code_dpt', 'dpt', 'region', 'superficie_km2', 'nbre_habitant']],
how='left', on = "code_dpt")
#Visualisation des 5 dernières lignes
irvePlus.tail()
#Estimation du nombre de stations de recharge (en anglais Charging Station Pool)
irvePlus.id_station.nunique()
#Estimation du nombre de bornes de recharge (en anglais Charging Station)
irvePlus.id_borne.nunique()
len(irvePlus.n_station.unique())
#Estimation du nombre de points de recharge (en anglais Charging Point)
irvePlus.nbre_pdc.sum()
```
Notons que selon les études la répartition établie ci-dessus diverge. Parfois par abus de langage entre borne de recharge et point de charge. Ici, il n'est pas réalisable d'avoir une granularité plus fine qui pourrait prendre en compte l'état de service de la borne.
```
#Sauvegarde
irvePlus.to_csv('p8_datatable/irvePlus.csv')
```
### Prévision du nombre de Points de charge à 5 ans
A partir de l'échantillon 'irve_type' basé sur des chiffres trimestriels, l'échantillon sera re-calibré par mois afin d'avoir une granularité plus fine des données.
```
#Rappel de l'échantillon 'irve_type' vu en début de Mission 2
irve_type
#Création d'un échantillon spécifique à la prévision
irve_type_month = irve_type.copy()
irve_type_month = irve_type_month[['Time', 'Accessible au public']].set_index('Time')
irve_type_month = irve_type_month.resample('M').sum().reset_index()
#Intégration de deux lignes d'observations manquantes
irve_type_month.loc[58] = ['2015-01-31 00:00:00', 0]
irve_type_month.loc[59] = ['2015-02-28 00:00:00', 0]
#Mise en forme de l'échantillon
irve_type_month['Time'] = pd.to_datetime(irve_type_month['Time'])
irve_type_month = irve_type_month.sort_values(by='Time').reset_index(drop=True)
#Ventilation des valeurs trimestrielles /Mois
seed(1)
for i, row in irve_type_month.iterrows():
if row['Time'] < pd.Timestamp('2015-03-31') :
irve_type_month.loc[i, 'Accessible au public'] = randint(5000, 8478)
elif (row['Time'] > pd.Timestamp('2015-03-31')) & (row['Time'] < pd.Timestamp('2015-06-30')):
irve_type_month.loc[i, 'Accessible au public'] = randint(8478, 10086)
elif (row['Time'] > pd.Timestamp('2015-06-30')) & (row['Time'] < pd.Timestamp('2015-09-30')):
irve_type_month.loc[i, 'Accessible au public'] = randint(10086, 10928)
elif (row['Time'] > pd.Timestamp('2015-09-30')) & (row['Time'] < pd.Timestamp('2015-12-31')):
irve_type_month.loc[i, 'Accessible au public'] = randint(10928, 11113)
elif (row['Time'] > pd.Timestamp('2015-12-31')) & (row['Time'] < pd.Timestamp('2016-03-31')):
irve_type_month.loc[i, 'Accessible au public'] = randint(11113, 12830)
elif (row['Time'] > pd.Timestamp('2016-03-31')) & (row['Time'] < pd.Timestamp('2016-06-30')):
irve_type_month.loc[i, 'Accessible au public'] = randint(12830, 13861)
elif (row['Time'] > pd.Timestamp('2016-06-30')) & (row['Time'] < pd.Timestamp('2016-09-30')):
irve_type_month.loc[i, 'Accessible au public'] = randint(12859, 13861)
elif (row['Time'] > pd.Timestamp('2016-09-30')) & (row['Time'] < pd.Timestamp('2016-12-31')):
irve_type_month.loc[i, 'Accessible au public'] = randint(13861, 16220)
elif (row['Time'] > pd.Timestamp('2016-12-31')) & (row['Time'] < pd.Timestamp('2017-03-31')):
irve_type_month.loc[i, 'Accessible au public'] = randint(16220, 17423)
elif (row['Time'] > pd.Timestamp('2017-03-31')) & (row['Time'] < pd.Timestamp('2017-06-30')):
irve_type_month.loc[i, 'Accessible au public'] = randint(17423, 19750)
elif (row['Time'] > pd.Timestamp('2017-06-30')) & (row['Time'] < pd.Timestamp('2017-09-30')):
irve_type_month.loc[i, 'Accessible au public'] = randint(19750, 20688)
elif (row['Time'] > pd.Timestamp('2017-09-30')) & (row['Time'] < pd.Timestamp('2017-12-31')):
irve_type_month.loc[i, 'Accessible au public'] = randint(19309, 20688)
elif (row['Time'] > pd.Timestamp('2017-12-31')) & (row['Time'] < pd.Timestamp('2018-03-31')):
irve_type_month.loc[i, 'Accessible au public'] = randint(19309, 26370)
elif (row['Time'] > pd.Timestamp('2018-03-31')) & (row['Time'] < pd.Timestamp('2018-06-30')):
irve_type_month.loc[i, 'Accessible au public'] = randint(22283, 26370)
elif (row['Time'] > pd.Timestamp('2018-06-30')) & (row['Time'] < pd.Timestamp('2018-09-30')):
irve_type_month.loc[i, 'Accessible au public'] = randint(22283, 24362)
elif (row['Time'] > pd.Timestamp('2018-09-30')) & (row['Time'] < pd.Timestamp('2018-12-31')):
irve_type_month.loc[i, 'Accessible au public'] = randint(24362, 26297)
elif (row['Time'] > pd.Timestamp('2018-12-31')) & (row['Time'] < pd.Timestamp('2019-03-31')):
irve_type_month.loc[i, 'Accessible au public'] = randint(26297, 27446)
elif (row['Time'] > pd.Timestamp('2019-03-31')) & (row['Time'] < pd.Timestamp('2019-06-30')):
irve_type_month.loc[i, 'Accessible au public'] = randint(27446, 28910)
elif (row['Time'] > pd.Timestamp('2019-06-30')) & (row['Time'] < pd.Timestamp('2019-09-30')):
irve_type_month.loc[i, 'Accessible au public'] = randint(28910, 31461)
elif (row['Time'] > pd.Timestamp('2019-09-30')) & (row['Time'] < pd.Timestamp('2019-12-31')):
irve_type_month.loc[i, 'Accessible au public'] = randint(30110, 31461)
else :
pass
#Affichage de l'échantillon
irve_type_month
#Sauvegarde
irve_type_month.to_csv('p8_datatable/irve_type_month.csv')
#Mise en oeuvre de l'algorithme Prophet (Facebook)
from fbprophet import Prophet
pdc_forecast_prophet = irve_type_month.copy()
pdc_forecast_prophet = pdc_forecast_prophet[['Time', 'Accessible au public']]
pdc_forecast_prophet.rename(columns={'Time': 'ds', 'Accessible au public': 'y'}, inplace=True)
pdc_forecast_prophet.tail()
#Sauvegarde
pdc_forecast_prophet.to_csv('p8_datatable/pdc_forecast_prophet.csv')
#Instanciation et entrainement du modèle
model = Prophet(yearly_seasonality=True, weekly_seasonality=False, daily_seasonality=False)
model.fit(pdc_forecast_prophet)
#Prévision du nombre de Points de charge à 5 ans
future = model.make_future_dataframe(periods=60, freq='M')
forecast = model.predict(future)
fig = model.plot(forecast)
fig.savefig('p8_img/forecast_prophet_pdc.png')
#Affichage des 5 derniers mois de prévision
forecast_pdc = model.predict(future)
forecast_pdc[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail()
#Sauvegarde
forecast_pdc.to_csv('p8_datatable/forecast_pdc.csv')
```
D'ici fin 2024 le maillage de Points de charge pourrait être étendu à environ 56 000 connecteurs, selon la prédiction de l'algorithme Prophet.
```
#Préparation des données (observations + prévisions) pour Test statistique
metric_forecast_pdc = forecast_pdc.set_index('ds')[['yhat']].join(pdc_forecast_prophet.set_index('ds').y).reset_index()
metric_forecast_pdc.dropna(inplace=True)
metric_forecast_pdc
#Mesures statistiques permettant d'évaluer le modèle
print("R2 = " + str(r2_score(metric_forecast_pdc['y'], metric_forecast_pdc['yhat'])))
print("MSE = " + str(mean_squared_error(metric_forecast_pdc['y'], metric_forecast_pdc['yhat'])))
print("RMSE = " + str(math.sqrt(mean_squared_error(metric_forecast_pdc['y'], metric_forecast_pdc['yhat']))))
print("MAE = " + str(mean_absolute_error(metric_forecast_pdc['y'], metric_forecast_pdc['yhat'])))
```
Les coefficients statistiques sont plus optimistes que ceux des précédentes prévisions. Le coefficient de détermination reste proche de 1, seulement les autres métriques d'écart sont assez élevées. En d'autres termes la robustesse du modèle n'est pas très satisfaisante.
<u>A des fins de comparaison, la méthode de Holt-winters est également exploitée.</u>
```
#Préparation des données
irve_forecast_hw = irve_type_month.copy()
irve_forecast_hw['Time'] = pd.to_datetime(irve_forecast_hw['Time'])
irve_forecast_hw.set_index('Time', inplace=True)
#Méthode ExponentialSmoothing de statsmodels est utilisée pour la modélisation d'Holt-Winters.
from statsmodels.tsa.api import ExponentialSmoothing
y = np.array(irve_forecast_hw['Accessible au public'])
hw = ExponentialSmoothing(y, seasonal_periods=12, trend='add', seasonal='add').fit()
hw_pred = hw.forecast(60)
#Visualisation de la prévision à 5 ans par Holt-Winters
plt.figure(figsize(16, 8))
plt.plot(irve_forecast_hw['Accessible au public'], label='PDC')
plt.plot(pd.date_range(irve_forecast_hw.index[len(y)-1], periods=60, freq='M'),
hw_pred, label='Prévision Holt-Winters')
plt.title("Points de charge ouverts au public en France d'ici 2024")
fig.savefig('p8_img/holtwinters_pdc.png')
plt.legend()
plt.show()
#Affichage des valeurs prédites
hw_pred
```
**Après ces deux modélisations, on peut conclure à un développement du réseau des points de charge (PDC ou Charging Point) entre 55 000 et 60 000 connecteurs d'ici fin 2024.**
[Retour vers la page notebook précédente (Positionnement de la voiture électrique de 2010 à 2019 et prévision à 2 ans)](https://github.com/nalron/project_electric_cars_france2040/blob/french_version/p8_notebook01.ipynb)
[Voir la suite du projet : Appel de charge au réseau électrique (Profilage d'un pic de consommation en 2040, etc…)](https://github.com/nalron/project_electric_cars_france2040/blob/french_version/p8_notebook03.ipynb)
| github_jupyter |
# Alignment
The goal of this notebook is to align files using DTW, weakly-ordered Segmental DTW, or strictly-ordered Segmental DTW.
```
%matplotlib inline
%load_ext Cython
import numpy as np
import matplotlib.pyplot as plt
import librosa as lb
import os.path
from pathlib import Path
import pickle
import multiprocessing
import time
import gc
```
### Align with DTW
The following cell contains a cython implementation of basic DTW.
```
%%cython
import numpy as np
cimport numpy as np
cimport cython
import sys
import time
DTYPE_INT32 = np.int32
ctypedef np.int32_t DTYPE_INT32_t
DTYPE_FLOAT = np.float64
ctypedef np.float64_t DTYPE_FLOAT_t
cdef DTYPE_FLOAT_t MAX_FLOAT = float('inf')
# careful, without bounds checking can mess up memory - also can't use negative indices I think (like x[-1])
@cython.boundscheck(False) # turn off bounds-checking for entire function
def DTW_Cost_To_AccumCostAndSteps(Cin, parameter):
'''
Inputs
C: The cost Matrix
'''
'''
Section for checking and catching errors in the inputs
'''
cdef np.ndarray[DTYPE_FLOAT_t, ndim=2] C
try:
C = np.array(Cin, dtype=DTYPE_FLOAT)
except TypeError:
print(bcolors.FAIL + "FAILURE: The type of the cost matrix is wrong - please pass in a 2-d numpy array" + bcolors.ENDC)
return [-1, -1, -1]
except ValueError:
print(bcolors.FAIL + "FAILURE: The type of the elements in the cost matrix is wrong - please have each element be a float (perhaps you passed in a matrix of ints?)" + bcolors.ENDC)
return [-1, -1, -1]
cdef np.ndarray[np.uint32_t, ndim=1] dn
cdef np.ndarray[np.uint32_t, ndim=1] dm
cdef np.ndarray[DTYPE_FLOAT_t, ndim=1] dw
# make sure dn, dm, and dw are setup
# dn loading and exception handling
if ('dn' in parameter.keys()):
try:
dn = np.array(parameter['dn'], dtype=np.uint32)
except TypeError:
print(bcolors.FAIL + "FAILURE: The type of dn (row steps) is wrong - please pass in a 1-d numpy array that holds uint32s" + bcolors.ENDC)
return [-1, -1, -1]
except ValueError:
print(bcolors.FAIL + "The type of the elements in dn (row steps) is wrong - please have each element be a uint32 (perhaps you passed a long?). You can specify this when making a numpy array like: np.array([1,2,3],dtype=np.uint32)" + bcolors.ENDC)
return [-1, -1, -1]
else:
dn = np.array([1, 1, 0], dtype=np.uint32)
# dm loading and exception handling
if 'dm' in parameter.keys():
try:
dm = np.array(parameter['dm'], dtype=np.uint32)
except TypeError:
print(bcolors.FAIL + "FAILURE: The type of dm (col steps) is wrong - please pass in a 1-d numpy array that holds uint32s" + bcolors.ENDC)
return [-1, -1, -1]
except ValueError:
print(bcolors.FAIL + "FAILURE: The type of the elements in dm (col steps) is wrong - please have each element be a uint32 (perhaps you passed a long?). You can specify this when making a numpy array like: np.array([1,2,3],dtype=np.uint32)" + bcolors.ENDC)
return [-1, -1, -1]
else:
print(bcolors.FAIL + "dm (col steps) was not passed in (gave default value [1,0,1]) " + bcolors.ENDC)
dm = np.array([1, 0, 1], dtype=np.uint32)
# dw loading and exception handling
if 'dw' in parameter.keys():
try:
dw = np.array(parameter['dw'], dtype=DTYPE_FLOAT)
except TypeError:
print(bcolors.FAIL + "FAILURE: The type of dw (step weights) is wrong - please pass in a 1-d numpy array that holds floats" + bcolors.ENDC)
return [-1, -1, -1]
except ValueError:
print(bcolors.FAIL + "FAILURE:The type of the elements in dw (step weights) is wrong - please have each element be a float (perhaps you passed ints or a long?). You can specify this when making a numpy array like: np.array([1,2,3],dtype=np.float64)" + bcolors.ENDC)
return [-1, -1, -1]
else:
dw = np.array([1, 1, 1], dtype=DTYPE_FLOAT)
print(bcolors.FAIL + "dw (step weights) was not passed in (gave default value [1,1,1]) " + bcolors.ENDC)
'''
Section where types are given to the variables we're going to use
'''
# create matrices to store our results (D and E)
cdef DTYPE_INT32_t numRows = C.shape[0] # only works with np arrays, use np.shape(x) will work on lists? want to force to use np though?
cdef DTYPE_INT32_t numCols = C.shape[1]
cdef DTYPE_INT32_t numDifSteps = np.size(dw)
cdef unsigned int maxRowStep = max(dn)
cdef unsigned int maxColStep = max(dm)
cdef np.ndarray[np.uint32_t, ndim=2] steps = np.zeros((numRows,numCols), dtype=np.uint32)
cdef np.ndarray[DTYPE_FLOAT_t, ndim=2] accumCost = np.ones((maxRowStep + numRows, maxColStep + numCols), dtype=DTYPE_FLOAT) * MAX_FLOAT
cdef DTYPE_FLOAT_t bestCost
cdef DTYPE_INT32_t bestCostIndex
cdef DTYPE_FLOAT_t costForStep
cdef unsigned int row, col
cdef unsigned int stepIndex
'''
The start of the actual algorithm, now that all our variables are set up
'''
# initializing the cost matrix - depends on whether its subsequence DTW
# essentially allow us to hop on the bottom anywhere (so could start partway through one of the signals)
if parameter['SubSequence']:
for col in range(numCols):
accumCost[maxRowStep, col + maxColStep] = C[0, col]
else:
accumCost[maxRowStep, maxColStep] = C[0,0]
# filling the accumulated cost matrix
for row in range(maxRowStep, numRows + maxRowStep, 1):
for col in range(maxColStep, numCols + maxColStep, 1):
bestCost = accumCost[<unsigned int>row, <unsigned int>col] # initialize with what's there - so if is an entry point, then can start low
bestCostIndex = 0
# go through each step, find the best one
for stepIndex in range(numDifSteps):
#costForStep = accumCost[<unsigned int>(row - dn[<unsigned int>(stepIndex)]), <unsigned int>(col - dm[<unsigned int>(stepIndex)])] + dw[<unsigned int>(stepIndex)] * C[<unsigned int>(row - maxRowStep), <unsigned int>(col - maxColStep)]
costForStep = accumCost[<unsigned int>((row - dn[(stepIndex)])), <unsigned int>((col - dm[(stepIndex)]))] + dw[stepIndex] * C[<unsigned int>(row - maxRowStep), <unsigned int>(col - maxColStep)]
if costForStep < bestCost:
bestCost = costForStep
bestCostIndex = stepIndex
# save the best cost and best cost index
accumCost[row, col] = bestCost
steps[<unsigned int>(row - maxRowStep), <unsigned int>(col - maxColStep)] = bestCostIndex
# return the accumulated cost along with the matrix of steps taken to achieve that cost
return [accumCost[maxRowStep:, maxColStep:], steps]
@cython.boundscheck(False) # turn off bounds-checking for entire function
def DTW_GetPath(np.ndarray[DTYPE_FLOAT_t, ndim=2] accumCost, np.ndarray[np.uint32_t, ndim=2] stepsForCost, parameter):
'''
Parameter should have: 'dn', 'dm', 'dw', 'SubSequence'
'''
cdef np.ndarray[unsigned int, ndim=1] dn
cdef np.ndarray[unsigned int, ndim=1] dm
cdef np.uint8_t subseq
cdef np.int32_t startCol # added
# make sure dn, dm, and dw are setup
if ('dn' in parameter.keys()):
dn = parameter['dn']
else:
dn = np.array([1, 1, 0], dtype=DTYPE_INT32)
if 'dm' in parameter.keys():
dm = parameter['dm']
else:
dm = np.array([1, 0, 1], dtype=DTYPE_INT32)
if 'SubSequence' in parameter.keys():
subseq = parameter['SubSequence']
else:
subseq = 0
# added START
if 'startCol' in parameter.keys():
startCol = parameter['startCol']
else:
startCol = -1
# added END
cdef np.uint32_t numRows
cdef np.uint32_t numCols
cdef np.uint32_t curRow
cdef np.uint32_t curCol
cdef np.uint32_t endCol
cdef DTYPE_FLOAT_t endCost
numRows = accumCost.shape[0]
numCols = accumCost.shape[1]
# either start at the far corner (non sub-sequence)
# or start at the lowest cost entry in the last row (sub-sequence)
# where all of the signal along the row has been used, but only a
# sub-sequence of the signal along the columns has to be used
curRow = numRows - 1
if subseq:
curCol = np.argmin(accumCost[numRows - 1, :])
else:
curCol = numCols - 1
# added - if specified, overrides above
if startCol >= 0:
curCol = startCol
endCol = curCol
endCost = accumCost[curRow, curCol]
cdef np.uint32_t curRowStep
cdef np.uint32_t curColStep
cdef np.uint32_t curStepIndex
cdef np.ndarray[np.uint32_t, ndim=2] path = np.zeros((2, numRows + numCols), dtype=np.uint32) # make as large as could need, then chop at the end
path[0, 0] = curRow
path[1, 0] = curCol
cdef np.uint32_t stepsInPath = 1 # starts at one, we add in one before looping
cdef np.uint32_t stepIndex = 0
cdef np.int8_t done = (subseq and curRow == 0) or (curRow == 0 and curCol == 0)
while not done:
if accumCost[curRow, curCol] == MAX_FLOAT:
print('A path is not possible')
break
# you're done if you've made it to the bottom left (non sub-sequence)
# or just the bottom (sub-sequence)
# find the step size
curStepIndex = stepsForCost[curRow, curCol]
curRowStep = dn[curStepIndex]
curColStep = dm[curStepIndex]
# backtrack by 1 step
curRow = curRow - curRowStep
curCol = curCol - curColStep
# add your new location onto the path
path[0, stepsInPath] = curRow
path[1, stepsInPath] = curCol
stepsInPath = stepsInPath + 1
# check to see if you're done
done = (subseq and curRow == 0) or (curRow == 0 and curCol == 0)
# reverse the path (a matrix with two rows) and return it
return [np.fliplr(path[:, 0:stepsInPath]), endCol, endCost]
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def alignDTW(featfile1, featfile2, steps, weights, downsample, outfile = None, profile = False):
F1 = np.load(featfile1) # 12 x N
F2 = np.load(featfile2) # 12 x M
if max(F1.shape[1], F2.shape[1]) / min(F1.shape[1], F2.shape[1]) >= 2: # no valid path possible
if outfile:
pickle.dump(None, open(outfile, 'wb'))
return None
times = []
times.append(time.time())
C = 1 - F1[:,0::downsample].T @ F2[:,0::downsample] # cos distance metric
times.append(time.time())
dn = steps[:,0].astype(np.uint32)
dm = steps[:,1].astype(np.uint32)
parameters = {'dn': dn, 'dm': dm, 'dw': weights, 'SubSequence': False}
[D, s] = DTW_Cost_To_AccumCostAndSteps(C, parameters)
times.append(time.time())
[wp, endCol, endCost] = DTW_GetPath(D, s, parameters)
times.append(time.time())
if outfile:
pickle.dump(wp, open(outfile, 'wb'))
if profile:
return wp, np.diff(times)
else:
return wp
def alignDTW_batch(querylist, featdir1, featdir2, outdir, n_cores, steps, weights, downsample):
outdir.mkdir(parents=True, exist_ok=True)
# prep inputs for parallelization
inputs = []
with open(querylist, 'r') as f:
for line in f:
parts = line.strip().split(' ')
assert len(parts) == 2
featfile1 = (featdir1 / parts[0]).with_suffix('.npy')
featfile2 = (featdir2 / parts[1]).with_suffix('.npy')
queryid = os.path.basename(parts[0]) + '__' + os.path.basename(parts[1])
outfile = (outdir / queryid).with_suffix('.pkl')
if os.path.exists(outfile):
print(f"Skipping {outfile}")
else:
inputs.append((featfile1, featfile2, steps, weights, downsample, outfile))
# process files in parallel
pool = multiprocessing.Pool(processes = n_cores)
pool.starmap(alignDTW, inputs)
return
```
Align a single pair of audio files
```
featfile1 = 'features/clean/Chopin_Op068No3/Chopin_Op068No3_Tomsic-1995_pid9190-11.npy'
featfile2 = 'features/clean/Chopin_Op068No3/Chopin_Op068No3_Cortot-1951_pid9066b-19.npy'
steps = np.array([1,1,1,2,2,1]).reshape((-1,2))
weights = np.array([2,3,3])
downsample = 1
wp = alignDTW(featfile1, featfile2, steps, weights, downsample)
```
Align all pairs of audio files
```
query_list = 'cfg_files/query.test.list'
featdir1 = Path('features/clean')
featdir2 = Path('features/clean') # in case you want to align clean vs noisy
outdir = Path('experiments_test/dtw_clean')
n_cores = 1
steps = np.array([1,1,1,2,2,1]).reshape((-1,2))
weights = np.array([2,3,3])
downsample = 1
inputs = alignDTW_batch(query_list, featdir1, featdir2, outdir, n_cores, steps, weights, downsample)
```
### Align with WSDTW
Align with weakly-ordered Segmental DTW.
```
def alignWSDTW(featfile1, featfile2, steps, weights, downsample, numSegments, outfile = None, profile = False):
# compute cost matrix
F1 = np.load(featfile1) # 12 x N
F2 = np.load(featfile2) # 12 x M
if max(F1.shape[1], F2.shape[1]) / min(F1.shape[1], F2.shape[1]) >= 2: # no valid path possible
if outfile:
pickle.dump(None, open(outfile, 'wb'))
return None
times = []
times.append(time.time())
C = 1 - F1[:,0::downsample].T @ F2[:,0::downsample] # cos distance metric
times.append(time.time())
# run subseqDTW on chunks
seglen = int(np.ceil(C.shape[0] / numSegments))
dn1 = steps[:,0].astype(np.uint32)
dm1 = steps[:,1].astype(np.uint32)
dw1 = weights
params1 = {'dn': dn1, 'dm': dm1, 'dw': dw1, 'SubSequence': True}
Dparts = []
Bparts = []
for i in range(numSegments):
Cpart = C[i*seglen : min((i+1)*seglen, C.shape[0]), :]
[D, B] = DTW_Cost_To_AccumCostAndSteps(Cpart, params1)
Dparts.append(D)
Bparts.append(B)
times.append(time.time())
# run segment-level DP
Cseg = np.zeros((numSegments+1, C.shape[1]))
for i in range(len(Dparts)):
Cseg[i+1,:] = Dparts[i][-1,:]
dn2 = np.array([0, 1], dtype=np.uint32)
dm2 = np.array([1, seglen//np.max(steps[:,0])], dtype=np.uint32)
dw2 = np.array([0, 1])
params2 = {'dn': dn2, 'dm': dm2, 'dw': dw2, 'SubSequence': False}
[Dseg, Bseg] = DTW_Cost_To_AccumCostAndSteps(Cseg, params2)
times.append(time.time())
[wpseg, _, _] = DTW_GetPath(Dseg, Bseg, params2)
# backtrace
segmentEndIdxs = getSegmentEndingLocs(wpseg)
times.append(time.time())
wps = []
for i, endidx in enumerate(segmentEndIdxs):
params3 = {'dn': dn1, 'dm': dm1, 'dw': dw1, 'SubSequence': True, 'startCol': endidx}
[wpchunk, _, _] = DTW_GetPath(Dparts[i], Bparts[i], params3)
wpchunk[0,:] = wpchunk[0,:] + i*seglen # account for relative offset
wps.append(wpchunk.copy())
wp_merged = np.hstack(wps)
times.append(time.time())
if outfile:
pickle.dump(wp_merged, open(outfile, 'wb'))
if profile:
return wp_merged, np.diff(times)
else:
return wp_merged
def getSegmentEndingLocs(wp):
prevLoc = wp[:,0] # [r,c]
endLocs = []
for i in range(wp.shape[1]):
curLoc = wp[:,i]
if curLoc[0] != prevLoc[0]: # if row changes
endLocs.append(curLoc[1])
prevLoc = curLoc
return endLocs
def alignSegmentalDTW_batch(querylist, featdir1, featdir2, outdir, n_cores, steps, weights, downsample, numSegments, fn):
outdir.mkdir(parents=True, exist_ok=True)
# prep inputs for parallelization
inputs = []
with open(querylist, 'r') as f:
for line in f:
parts = line.strip().split(' ')
assert len(parts) == 2
featfile1 = (featdir1 / parts[0]).with_suffix('.npy')
featfile2 = (featdir2 / parts[1]).with_suffix('.npy')
queryid = os.path.basename(parts[0]) + '__' + os.path.basename(parts[1])
outfile = (outdir / queryid).with_suffix('.pkl')
if os.path.exists(outfile):
print(f"Skipping {outfile}")
else:
inputs.append((featfile1, featfile2, steps, weights, downsample, numSegments, outfile))
# process files in parallel
pool = multiprocessing.Pool(processes = n_cores)
pool.starmap(fn, inputs)
return
```
Align a single pair of audio files
```
featfile1 = 'features/clean/Chopin_Op017No4/Chopin_Op017No4_Afanassiev-2001_pid9130-01.npy'
featfile2 = 'features/clean/Chopin_Op017No4/Chopin_Op017No4_Ben-Or-1989_pid9161-12.npy'
steps = np.array([1,1,1,2,2,1]).reshape((-1,2))
weights = np.array([1,1,2])
downsample = 1
numSegments = 5
wp = alignWSDTW(featfile1, featfile2, steps, weights, downsample, numSegments)
```
Align all pairs of audio files
```
query_list = 'cfg_files/query.test.list'
featdir1 = Path('features/clean')
featdir2 = Path('features/clean') # in case you want to align clean vs noisy
n_cores = 1
steps = np.array([1,1,1,2,2,1]).reshape((-1,2))
weights = np.array([1,1,2])
downsample = 1
segmentVals = [2, 4, 8, 16, 32]
for numSegments in segmentVals:
outdir = Path(f'experiments_test/wsdtw_{numSegments}_clean')
alignSegmentalDTW_batch(query_list, featdir1, featdir2, outdir, n_cores, steps, weights, downsample, numSegments, alignWSDTW)
```
### Align with SSDTW
Align with strictly-ordered Segmental DTW
```
%%cython
import numpy as np
cimport numpy as np
cimport cython
import sys
import time
DTYPE_INT32 = np.int32
ctypedef np.int32_t DTYPE_INT32_t
DTYPE_FLOAT = np.float64
ctypedef np.float64_t DTYPE_FLOAT_t
cdef DTYPE_FLOAT_t MAX_FLOAT = float('inf')
# careful, without bounds checking can mess up memory - also can't use negative indices I think (like x[-1])
@cython.boundscheck(False) # turn off bounds-checking for entire function
def Segment_DP(np.ndarray[DTYPE_FLOAT_t, ndim=2] C, np.ndarray[np.int32_t, ndim=2] T):
cdef DTYPE_INT32_t numRows = C.shape[0]
cdef DTYPE_INT32_t numCols = C.shape[1]
cdef np.ndarray[np.int32_t, ndim=2] steps = np.zeros((numRows+1,numCols), dtype=np.int32)
cdef np.ndarray[DTYPE_FLOAT_t, ndim=2] accumCost = np.ones((numRows+1, numCols), dtype=DTYPE_FLOAT) * MAX_FLOAT
cdef unsigned int row, col
cdef DTYPE_FLOAT_t skipCost
cdef np.int32_t jumpStartCol
cdef DTYPE_FLOAT_t jumpCost
# initialize
for row in range(numRows+1):
for col in range(numCols):
steps[row, col] = -1 # skip by default
for col in range(numCols):
accumCost[0, col] = 0 # all inf except first row
# dynamic programming
for row in range(1, numRows+1):
for col in range(numCols):
# skip transition
if col == 0:
skipCost = MAX_FLOAT
else:
skipCost = accumCost[row, col-1]
accumCost[row, col] = skipCost
# best step is skip by default, so don't need to assign
# jump transition
jumpStartCol = T[row-1, col]
if jumpStartCol >= 0: # valid subsequence path
jumpCost = accumCost[row-1, jumpStartCol] + C[row-1, col]
if jumpCost < skipCost:
accumCost[row, col] = jumpCost
steps[row, col] = jumpStartCol
return [accumCost, steps]
@cython.boundscheck(False) # turn off bounds-checking for entire function
def Segment_Backtrace(np.ndarray[DTYPE_FLOAT_t, ndim=2] accumCost, np.ndarray[np.int32_t, ndim=2] steps):
cdef np.uint32_t numRows = accumCost.shape[0]
cdef np.uint32_t numCols = accumCost.shape[1]
cdef np.uint32_t curRow = numRows - 1
cdef np.uint32_t curCol = numCols - 1
cdef np.int32_t jump
cdef np.ndarray[np.uint32_t, ndim=1] path = np.zeros(numRows-1, dtype=np.uint32)
cdef np.uint32_t pathElems = 0
while curRow > 0:
if accumCost[curRow, curCol] == MAX_FLOAT:
print('A path is not possible')
break
jump = steps[curRow, curCol]
if jump < 0: # skip
curCol = curCol - 1
else: # jump
path[pathElems] = curCol
pathElems = pathElems + 1
curRow = curRow - 1
curCol = jump
return path[::-1]
@cython.boundscheck(False) # turn off bounds-checking for entire function
def calc_Tseg(np.ndarray[DTYPE_FLOAT_t, ndim=2] accumCost, np.ndarray[np.uint32_t, ndim=2] stepsForCost, parameter):
'''
Parameter should have: 'dn', 'dm'
'''
cdef np.ndarray[unsigned int, ndim=1] dn
cdef np.ndarray[unsigned int, ndim=1] dm
cdef np.uint32_t numRows = accumCost.shape[0]
cdef np.uint32_t numCols = accumCost.shape[1]
cdef np.ndarray[np.int32_t, ndim=1] startLocs = np.zeros(numCols, dtype=np.int32)
cdef np.uint32_t endCol
cdef np.uint32_t curRow
cdef np.uint32_t curCol
cdef np.uint32_t curStepIndex
# get step transitions
if ('dn' in parameter.keys()):
dn = parameter['dn']
else:
dn = np.array([1, 1, 0], dtype=DTYPE_INT32)
if 'dm' in parameter.keys():
dm = parameter['dm']
else:
dm = np.array([1, 0, 1], dtype=DTYPE_INT32)
# backtrace from every location
for endCol in range(numCols):
curCol = endCol
curRow = numRows - 1
while curRow > 0:
if accumCost[curRow, curCol] == MAX_FLOAT: # no valid path
startLocs[curCol] = -1
break
curStepIndex = stepsForCost[curRow, curCol]
curRow = curRow - dn[curStepIndex]
curCol = curCol - dm[curStepIndex]
if curRow == 0:
startLocs[endCol] = curCol
return startLocs
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def alignSSDTW(featfile1, featfile2, steps, weights, downsample, numSegments, outfile = None, profile = False):
# compute cost matrix
F1 = np.load(featfile1) # 12 x N
F2 = np.load(featfile2) # 12 x M
swap = (F1.shape[1] > F2.shape[1])
if swap:
F1, F2 = F2, F1 # make the shorter sequence the query
if max(F1.shape[1], F2.shape[1]) / min(F1.shape[1], F2.shape[1]) >= 2: # no valid path possible
if outfile:
pickle.dump(None, open(outfile, 'wb'))
return None
times = []
times.append(time.time())
C = 1 - F1[:,0::downsample].T @ F2[:,0::downsample] # cos distance metric
times.append(time.time())
# run subseqDTW on chunks
seglen = int(np.ceil(F1.shape[1] / numSegments))
dn = steps[:,0].astype(np.uint32)
dm = steps[:,1].astype(np.uint32)
dw = weights
params1 = {'dn': dn, 'dm': dm, 'dw': dw, 'SubSequence': True}
Dparts = []
Bparts = []
for i in range(numSegments):
Cpart = C[i*seglen : min((i+1)*seglen, F1.shape[1]), :]
[D, B] = DTW_Cost_To_AccumCostAndSteps(Cpart, params1)
Dparts.append(D)
Bparts.append(B)
times.append(time.time())
# construct Cseg, Tseg
Cseg = np.zeros((numSegments, F2.shape[1]))
Tseg = np.zeros((numSegments, F2.shape[1]), dtype=np.int32)
for i, Dpart in enumerate(Dparts):
Cseg[i,:] = Dpart[-1,:]
Tseg[i,:] = calc_Tseg(Dpart, Bparts[i], params1)
times.append(time.time())
# segment-level DP
[Dseg, Bseg] = Segment_DP(Cseg, Tseg)
times.append(time.time())
segmentEndIdxs = Segment_Backtrace(Dseg, Bseg)
times.append(time.time())
# backtrace on chunks
wps = []
for i, endidx in enumerate(segmentEndIdxs):
params2 = {'dn': dn, 'dm': dm, 'dw': dw, 'SubSequence': True, 'startCol': endidx}
[wpchunk, _, _] = DTW_GetPath(Dparts[i], Bparts[i], params2)
wpchunk[0,:] = wpchunk[0,:] + i*seglen # account for relative offset
wps.append(wpchunk.copy())
wp_merged = np.hstack(wps)
times.append(time.time())
if swap:
wp_merged = np.flipud(wp_merged) # undo swap
if outfile:
pickle.dump(wp_merged, open(outfile, 'wb'))
if profile:
return wp_merged, np.diff(times)
else:
return wp_merged
```
Align a single pair of audio files
```
featfile1 = 'features/clean/Chopin_Op017No4/Chopin_Op017No4_Afanassiev-2001_pid9130-01.npy'
featfile2 = 'features/clean/Chopin_Op017No4/Chopin_Op017No4_Ben-Or-1989_pid9161-12.npy'
steps = np.array([1,1,1,2,2,1]).reshape((-1,2))
weights = np.array([1,1,2])
downsample = 1
numSegments = 5
wp = alignSSDTW(featfile1, featfile2, steps, weights, downsample, numSegments)
```
Align all pairs of audio files
```
query_list = 'cfg_files/query.test.list'
featdir1 = Path('features/clean')
featdir2 = Path('features/clean') # in case you want to align clean vs noisy
n_cores = 1
steps = np.array([1,1,1,2,2,1]).reshape((-1,2))
weights = np.array([1,1,2])
downsample = 1
segmentVals = [2, 4, 8, 16, 32]
for numSegments in segmentVals:
outdir = Path(f'experiments_test/ssdtw_{numSegments}_clean')
alignSegmentalDTW_batch(query_list, featdir1, featdir2, outdir, n_cores, steps, weights, downsample, numSegments, alignSSDTW)
```
### Runtime Profiling
Measure runtime of different DTW variants on cost matrices of varying sizes.
```
def saveRandomFeatureMatrices(sizes, outdir):
if not os.path.exists(outdir):
os.mkdir(outdir)
np.random.seed(0)
for sz in sizes:
F = np.random.rand(12, sz)
outfile = outdir + f'/F_{sz}.npy'
np.save(outfile, F)
return
sizes = [1000, 2000, 5000, 10000, 20000, 50000]
rand_feat_dir = 'features/random'
saveRandomFeatureMatrices(sizes, rand_feat_dir)
```
Profiling DTW
```
# DTW
outfile = 'dtw_prof.pkl'
steps = np.array([1,1,1,2,2,1]).reshape((-1,2))
weights = np.array([2,3,3])
downsample = 1
sizes = [50000, 20000, 10000, 5000, 2000, 1000]
N = 10
durs = np.zeros((len(sizes), N, 3)) # DTW runtime is broken into 3 parts
for i in range(len(sizes)):
sz = sizes[i]
print(f'Running size = {sz} ', end='')
featfile = rand_feat_dir + f'/F_{sz}.npy'
for j in range(N):
print('.', end='')
gc.collect()
_, times = alignDTW(featfile, featfile, steps, weights, downsample, profile=True)
durs[i,j,:] = np.array(times)
print('')
pickle.dump([durs, sizes], open(outfile, 'wb'))
```
Profiling WSDTW
```
# WSDTW
outfile = 'wsdtw_prof.pkl'
steps = np.array([1,1,1,2,2,1]).reshape((-1,2))
weights = np.array([1,1,2])
downsample = 1
segmentVals = [2, 4, 8, 16, 32]
sizes = [50000, 20000, 10000, 5000, 2000, 1000]
N = 10
durs = np.zeros((len(segmentVals), len(sizes), N, 5)) # WSDTW runtime is broken into 5 parts
for i, numSegments in enumerate(segmentVals):
print(f'Running numSegments = {numSegments} ', end='')
for j, sz in enumerate(sizes):
print('|', end='')
featfile = rand_feat_dir + f'/F_{sz}.npy'
for k in range(N):
print('.', end='')
gc.collect()
_, times = alignWSDTW(featfile, featfile, steps, weights, downsample, numSegments, profile=True)
durs[i,j,k,:] = np.array(times)
print('')
pickle.dump([durs, segmentVals, sizes], open(outfile, 'wb'))
```
Profiling SSDTW
```
# SSDTW
outfile = 'ssdtw_prof.pkl'
steps = np.array([1,1,1,2,2,1]).reshape((-1,2))
weights = np.array([1,1,2])
downsample = 1
segmentVals = [2, 4, 8, 16, 32]
sizes = [50000, 20000, 10000, 5000, 2000, 1000]
N = 10
durs = np.zeros((len(segmentVals), len(sizes), N, 6)) # SSDTW runtime is broken into 6 parts
for i, numSegments in enumerate(segmentVals):
print(f'Running numSegments = {numSegments} ', end='')
for j, sz in enumerate(sizes):
print('|', end='')
featfile = rand_feat_dir + f'/F_{sz}.npy'
for k in range(N):
print('.', end='')
gc.collect()
_, times = alignSSDTW(featfile, featfile, steps, weights, downsample, numSegments, profile=True)
durs[i,j,k,:] = np.array(times)
print('')
pickle.dump([durs, segmentVals, sizes], open(outfile, 'wb'))
```
### Comparing Alignments on Random Data
See how closely Segmental DTW variants match DTW alignments on random cost matrices.
```
def saveRandomFeatureMatrices2(sz, N, outdir):
if not os.path.exists(outdir):
os.mkdir(outdir)
np.random.seed(0)
for i in range(N):
F = np.random.rand(12, sz)
norm_factor = np.sqrt(np.sum(F*F, axis=0))
F = F / norm_factor
outfile = outdir + f'/F_{sz}_{i}.npy'
np.save(outfile, F)
return
sz = 10000
N = 10
rand_feat_dir = 'features/random'
saveRandomFeatureMatrices2(sz, N, rand_feat_dir)
featfile1 = 'features/random/F_10000_0.npy'
featfile2 = 'features/random/F_10000_6.npy'
# DTW
steps = np.array([1,1,1,2,2,1]).reshape((-1,2))
weights = np.array([2,3,3])
downsample = 1
wp = alignDTW(featfile1, featfile2, steps, weights, downsample)
# Segmental DTW variants
steps = np.array([1,1,1,2,2,1]).reshape((-1,2))
weights = np.array([1,1,2])
numSegments = 16
wp2 = alignWSDTW(featfile1, featfile2, steps, weights, downsample, numSegments)
wp3 = alignSSDTW(featfile1, featfile2, steps, weights, downsample, numSegments)
plt.plot(wp[0,:], wp[1,:], 'k')
plt.plot(wp2[0,:], wp2[1,:], 'r')
plt.plot(wp3[0,:], wp3[1,:], 'b')
```
| github_jupyter |
# Linear Regression
## Setup
First, let's set up some environmental dependencies. These just make the numerics easier and adjust some of the plotting defaults to make things more legible.
```
# Python 3 compatability
from __future__ import division, print_function
from six.moves import range
# system functions that are always useful to have
import time, sys, os
# basic numeric setup
import numpy as np
# inline plotting
%matplotlib inline
# plotting
import matplotlib
from matplotlib import pyplot as plt
# seed the random number generator
np.random.seed(56101)
# re-defining plotting defaults
from matplotlib import rcParams
rcParams.update({'xtick.major.pad': '7.0'})
rcParams.update({'xtick.major.size': '7.5'})
rcParams.update({'xtick.major.width': '1.5'})
rcParams.update({'xtick.minor.pad': '7.0'})
rcParams.update({'xtick.minor.size': '3.5'})
rcParams.update({'xtick.minor.width': '1.0'})
rcParams.update({'ytick.major.pad': '7.0'})
rcParams.update({'ytick.major.size': '7.5'})
rcParams.update({'ytick.major.width': '1.5'})
rcParams.update({'ytick.minor.pad': '7.0'})
rcParams.update({'ytick.minor.size': '3.5'})
rcParams.update({'ytick.minor.width': '1.0'})
rcParams.update({'font.size': 30})
import dynesty
```
Linear regression is ubiquitous in research. In this example we'll fit a line
$$ y=mx+b $$
to data where the error bars have been underestimated and need to be inflated by a factor $f$. This example is taken from the [emcee documentation](http://dan.iel.fm/emcee/current/user/line/).
```
# truth
m_true = -0.9594
b_true = 4.294
f_true = 0.534
# generate mock data
N = 50
x = np.sort(10 * np.random.rand(N))
yerr = 0.1 + 0.5 * np.random.rand(N)
y_true = m_true * x + b_true
y = y_true + np.abs(f_true * y_true) * np.random.randn(N)
y += yerr * np.random.randn(N)
# plot results
plt.figure(figsize=(10, 5))
plt.errorbar(x, y, yerr=yerr, fmt='ko', ecolor='red')
plt.plot(x, y_true, color='blue', lw=3)
plt.xlabel(r'$X$')
plt.ylabel(r'$Y$')
plt.tight_layout()
```
We will assume the errors are Normal and impose uniform priors on $(m, b, \ln f)$.
```
# log-likelihood
def loglike(theta):
m, b, lnf = theta
model = m * x + b
inv_sigma2 = 1.0 / (yerr**2 + model**2 * np.exp(2 * lnf))
return -0.5 * (np.sum((y-model)**2 * inv_sigma2 - np.log(inv_sigma2)))
# prior transform
def prior_transform(utheta):
um, ub, ulf = utheta
m = 5.5 * um - 5.
b = 10. * ub
lnf = 11. * ulf - 10.
return m, b, lnf
```
Let's sample from this distribution using multiple bounding ellipsoids and random "staggers" (and alternative to random walks).
```
dsampler = dynesty.DynamicNestedSampler(loglike, prior_transform, ndim=3,
bound='multi', sample='rstagger')
dsampler.run_nested()
dres = dsampler.results
```
Let's see how we did.
```
from dynesty import plotting as dyplot
truths = [m_true, b_true, np.log(f_true)]
labels = [r'$m$', r'$b$', r'$\ln f$']
fig, axes = dyplot.traceplot(dsampler.results, truths=truths, labels=labels,
fig=plt.subplots(3, 2, figsize=(16, 12)))
fig.tight_layout()
fig, axes = dyplot.cornerplot(dres, truths=truths, show_titles=True,
title_kwargs={'y': 1.04}, labels=labels,
fig=plt.subplots(3, 3, figsize=(15, 15)))
```
| github_jupyter |
<div class="alert alert-block alert-info">
<b><h1>ENGR 1330 Computational Thinking with Data Science </h1></b>
</div>
Copyright © 2021 Theodore G. Cleveland and Farhang Forghanparast
Last GitHub Commit Date:
# 11: Databases
- Fundamental Concepts
- Dataframes
- Read/Write to from files
---
## Objectives
1. To understand the **dataframe abstraction** as implemented in the Pandas library(module).
1. To be able to access and manipulate data within a dataframe
2. To be able to obtain basic statistical measures of data within a dataframe
2. Read/Write from/to files
1. MS Excel-type files (.xls,.xlsx,.csv) (LibreOffice files use the MS .xml standard)
2. Ordinary ASCII (.txt) files
---
## Computational Thinking Concepts
The CT concepts expressed within Databases include:
- `Decomposition` : Break a problem down into smaller pieces; Collections of data are decomposed into their smallest usable part
- `Abstraction` : A database is an abstraction of a collection of data
Suppose we need to store a car as a collection of its parts (implying dissassembly each time we park it), such decompotion would produce a situation like the image.
<img src="https://miro.medium.com/max/590/1*Xfxl8HoQqqg_KtpEsEcskw.jpeg" width="400">
If the location of each part is recorded, then we can determine if something is missing as in
<img src="http://54.243.252.9/engr-1330-webroot/engr1330jb/lessons/lesson11/modifiedCar.jpeg" width="400">
In the image, the two missing parts are pretty large, and would be evident on a fully assembled car (missing front corner panel, and right rear tire. Smaller parts would be harder to track on the fully assembled object. However if we had two fully assembled cars, and when we moved them heard the tink-tink-tink of a ball bearing bouncing on the floor, we would know something is missing - a query of the database to find where all the balls are supposed to be will help us figure out which car is incomplete.
In other contexts you wouldn’t want to have to take your car apart and store every piece separately whenever you park it in the garage. In that case, you would want to store your car as a single entry in the database (garage), and access its pieces through it (used car parts are usually sourced from fully assembled cars.
The U.S. Airforce keeps a lot of otherwise broken aircraft for parts replacement. As a part is removed it is entered into a database "a transaction" so they know that part is no longer in the broken aircraft lot but in service somewhere. So the database may locate a part in a bin in a hangar or a part that is residing in an assembled aircraft. In either case, the hangar (and parts bin) as well as the broken aircarft are both within the database schema - an abstraction.
<img src="http://54.243.252.9/engr-1330-webroot/engr1330jb/lessons/lesson11/boneyard.png" width = "500" >
And occassionally they grab a whole airframe
<img src="http://54.243.252.9/engr-1330-webroot/engr1330jb/lessons/lesson11/B52WhereRU.png" width = "500" >
---
### Databases
Databases are containers for data. A public library stores books, hence we could legitimately state that a library is a database of books. But strictly defined, databases are computer structures that save, organize, protect, and deliver data. A system that contains and manipulates databases is called a database management system, or DBMS.
A database can be thought of as a kind of electronic filing cabinet; it contains digitized information (“data”), which is kept in persistent storage of some kind. Users can insert new information into the database, and delete, change, or retrieve existing information in the database, by issuing requests or commands to the software that manages the database—which is to say, the database management system (DBMS).
In practice those user requests to the DBMS can be formulated in a variety of different ways (e.g., by pointing and clicking with a mouse). For our purposes, however, it’s more convenient to assume they’re expressed in the form of simple text strings in some formal language. Given a human resources database, for example, we might write:
```
EMP WHERE JOB = 'Programmer'
```
And this expression represents a retrieval request—more usually known as a `query` for employee information for employees whose job title is ‘Programmer’. A query submission and responce is called a transaction.
---
#### Database Types
The simplest form of databases is a text database. When data are organized in a text file in rows and columns, it can be used to store, organize, protect, and retrieve data. Saving a list of names in a file, starting with first name and followed by last name, would be a simple database. Each row of the file represents a record. You can update records by changing specific names, you can remove rows by deleting lines, and you can add new rows by adding new lines. The term "flat-file" database is a typical analog.
Desktop database programs are another type of database that's more complex than a flat-file text database yet still intended for a single user. A Microsoft Excel spreadsheet or Microsoft Access database are good examples of desktop database programs. These programs allow users to enter data, store it, protect it, and retrieve it when needed. The benefit of desktop database programs over text databases is the speed of changing data, and the ability to store comparatively large amounts of data while keeping performance of the system manageable.
Relational databases are the most common database systems. A relational database contains multiple tables of data with rows and columns that relate to each other through special key fields. These databases are more flexible than flat file structures, and provide functionality for reading, creating, updating, and deleting data. Relational databases use variations of Structured Query Language (SQL) - a standard user application that provides an easy programming interface for database interaction.
Some examples of Relational Database Management Systems (RDMS) are SQL Server, Oracle Database, Sybase, Informix, and MySQL. The relational database management systems (RDMS) exhibit superior performance for managing large collections of for multiple users (even thousands!) to work with the data at the same time, with elaborate security to protect the data. RDBMS systems still store data in columns and rows, which in turn make up tables. A table in RDBMS is like a spreadsheet, or any other flat-file structure. A set of tables makes up a schema. A number of schemas create a database.
Emergent structures for storing data today are NoSQL and object-oriented databases. These do not follow the table/row/column approach of RDBMS. Instead, they build bookshelves of elements and allow access per bookshelf. So, instead of tracking individual words in books, NoSQL and object-oriented databases narrow down the data you are looking for by pointing you to the bookshelf, then a mechanical assistant works with the books to identify the exact word you are looking for.
---
#### Relational Database Concepts
The figure below shows sample values for a typical database, having to do with suppliers, parts, and shipments (of parts by suppliers).
<img src="http://54.243.252.9/engr-1330-webroot/engr1330jb/lessons/lesson11/PartsAreParts.png" width="500">
Observe that this database contains three files, or tables. The tables are named S, P, and SP, respectively, and since they’re tables they’re made up of rows and columns (in conventional file terms, the rows correspond to records of the file in question and the columns to fields). They’re meant to be understood as follows:
> Table S represents suppliers under contract. Each supplier has one supplier number (SNO), unique to that supplier; one name (SNAME), not necessarily unique (though the sample values shown in Figure 1-1 do happen to be unique); one status value (STATUS); and one location (CITY). Note: In the rest of this book I’ll abbreviate “suppliers under contract,” most of the time, to just suppliers.
> Table P represents kinds of parts. Each kind of part has one part number (PNO), which is unique; one name (PNAME); one color (COLOR); one weight (WEIGHT); and one location where parts of that kind are stored (CITY). Note: In the rest of this book I’ll abbreviate “kinds of parts,” most of the time, to just parts.
> Table SP represents shipments—it shows which parts are shipped, or supplied, by which suppliers. Each shipment has one supplier number (SNO); one part number (PNO); and one quantity (QTY). Also, there’s at most one shipment at any given time for a given supplier and given part, and so the combination of supplier number and part number is unique to any given shipment.
Real databases tend to be much more complicated than this “toy” example. However we can make useful observations; these three tables are our schema (our framework for lack of a better word), and at this point is also our only schema, hence it is the `PartsIsParts` database (we have just named the database here)
### Dataframe-type Structure using primative python
First lets construct a dataframe like objects using python primatives, and the *PartsIsParts* database schema
```
parts = [['PNO','PNAME','COLOR','WEIGHT','CITY'],
['P1','Nut','Red',12.0,'London'],
['P2','Bolt','Green',17.0,'Paris'],
['P3','Screw','Blue',17.0,'Oslo'],
['P4','Screw','Red',14.0,'London'],
['P5','Cam','Blue',12.0,'Paris'],
['P6','Cog','Red',19.0,'London'],]
suppliers = [['SNO','SNAME','STATUS','CITY'],
['S1','Smith',20,'London'],
['S2','Jones',10,'Paris'],
['S3','Blake',30,'Paris'],
['S4','Clark',20,'London'],
['S5','Adams',30,'Athens']]
shipments = [['SNO','PNO','QTY'],
['S1','P1',300],
['S1','P2',200],
['S1','P3',400],
['S1','P4',200],
['S1','P5',100],
['S1','P6',100],
['S2','P1',300],
['S2','P2',400],
['S3','P2',200],
['S4','P2',200],
['S4','P4',300],
['S4','P5',400]]
```
Lets examine some things:
In each table there are columns, these are called fields. There are also rows, these are called records. Hidden from view is a unique record identifier for each record, each table.
Now lets query our database, lets list all parts whose weight is less than 13 - how do we proceede?
- We have to select the right table
- We have to construct a search to find all instances of parts with weight less than 13
- Print the result
For the toy problem not too hard
```
for i in range(1,len(parts)):
if parts[i][3] < 13.0 :
print(parts[i])
```
Now lets query our database, lets list all parts whose weight is less than 13 - but only list the part number, color, and city
- We have to select the right table
- We have to construct a search to find all instances of parts with weight less than 13
- Print the list slice with the requesite information
For the toy problem still not too hard, but immediately we see if this keeps up its going to get kind of tricky fast!; Also it would be nice to be able to refer to a column by its name.
```
for i in range(1,len(parts)):
if parts[i][3] < 13.0 :
print(parts[i][0],parts[i][2],parts[i][4]) # slice the sublist
```
Now lets modify contents of a table. Lets delete all instances of suppliers with status 10. Then for remaining suppliers elevate their status by 5.
Again
- We have to select the right table
- We have to construct a search to find all instances of status equal to 10
- If not equal to 10, copy the row, otherwise skip
- Delete original table, and rename the temporary table
```
temp=[]
for i in range(len(suppliers)):
print(suppliers[i])
for i in range(0,len(suppliers)):
if suppliers[i][2] == 10 :
continue
else:
temp.append(suppliers[i]) # slice the sublist
suppliers = temp # attempt to rewrite the original
for i in range(len(suppliers)):
print(suppliers[i])
```
Now suppose we want to find how many parts are coming from London, our query gets more complex, but still manageable.
```
temp=[]
for i in range(0,len(suppliers)):
if suppliers[i][3] == 'London' :
temp.append(suppliers[i][0]) # get supplier code from london
else:
continue
howmany = 0 # keep count
for i in range(0,len(shipments)):
for j in range(len(temp)):
if shipments[i][0] == temp[j]:
howmany = howmany + shipments[i][2]
else:
continue
print(howmany)
```
Instead of writing all our own scripts, unique to each database the python community created a module called `Pandas`, so named because most things in the world are made in China, and their national critter is a Panda Bear (actually the name is a contraction of **PAN**el **DA**ta **S**tructure' or something close to that.
So to build these queries in an easier fashion - lets examine `pandas`.
---
## The `pandas` module
- About Pandas
- How to install
- Anaconda
- JupyterHub/Lab (on Linux)
- JupyterHub/Lab (on MacOS)
- JupyterHub/Lab (on Windoze)
- The Dataframe
- Primatives
- Using Pandas
- Create, Modify, Delete datagrames
- Slice Dataframes
- Conditional Selection
- Synthetic Programming (Symbolic Function Application)
- Files
- Access Files from a remote Web Server
- Get file contents
- Get the actual file
- Adaptations for encrypted servers (future semester)
---
### About Pandas:
Pandas is the core library for dataframe manipulation in Python. It provides a high-performance multidimensional array object, and tools for working with these arrays. The library’s name is derived from the term ‘Panel Data’.
If you are curious about Pandas, this cheat sheet is recommended: [https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf](https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf)
#### Data Structure
The Primary data structure is called a dataframe. It is an **abstraction** where data are represented as a 2-dimensional mutable and heterogenous tabular data structure; much like a Worksheet in MS Excel. The structure itself is popular among statisticians and data scientists and business executives.
According to the marketing department
*"Pandas Provides rich data structures and functions designed to make working with data fast, easy, and expressive. It is useful in data manipulation, cleaning, and analysis; Pandas excels in performance and productivity "*
---
## The Dataframe
A data table is called a `DataFrame` in pandas (and other programming environments too). The figure below from [https://pandas.pydata.org/docs/getting_started/index.html](https://pandas.pydata.org/docs/getting_started/index.html) illustrates a dataframe model:

Each **column** and each **row** in a dataframe is called a series, the header row, and index column are special.
Like MS Excel we can query the dataframe to find the contents of a particular `cell` using its **row name** and **column name**, or operate on entire **rows** and **columns**
To use pandas, we need to import the module.
---
## Computational Thinking Concepts
The CT concepts expressed within Pandas include:
- `Decomposition` : Data interpretation, manipulation, and analysis of Pandas dataframes is an act of decomposition -- although the dataframes can be quite complex.
- `Abstraction` : The dataframe is a data representation abstraction that allows for placeholder operations, later substituted with specific contents for a problem; enhances reuse and readability. We leverage the principle of algebraic replacement using these abstractions.
- `Algorithms` : Data interpretation, manipulation, and analysis of dataframes are generally implemented as part of a supervisory algorithm.
---
## Module Set-Up
In principle, Pandas should be available in a default Anaconda install
- You should not have to do any extra installation steps to install the library in Python
- You do have to **import** the library in your scripts
How to check
- Simply open a code cell and run `import pandas` if the notebook does not protest (i.e. pink block of error), the youis good to go.
```
import pandas
```
If you do get an error, that means that you will have to install using `conda` or `pip`; you are on-your-own here! On the **content server** the process is:
1. Open a new terminal from the launcher
2. Change to root user `su` then enter the root password
3. `sudo -H /opt/jupyterhib/bin/python3 -m pip install pandas`
4. Wait until the install is complete; for security, user `compthink` is not in the `sudo` group
5. Verify the install by trying to execute `import pandas` as above.
The process above will be similar on a Macintosh, or Windows if you did not use an Anaconda distribution. Best is to have a sucessful anaconda install, or go to the [GoodJobUntilMyOrgansGetHarvested](https://apply.mysubwaycareer.com/us/en/).
If you have to do this kind of install, you will have to do some reading, some references I find useful are:
1. https://jupyterlab.readthedocs.io/en/stable/user/extensions.html
2. https://www.pugetsystems.com/labs/hpc/Note-How-To-Install-JupyterHub-on-a-Local-Server-1673/#InstallJupyterHub
3. https://jupyterhub.readthedocs.io/en/stable/installation-guide-hard.html (This is the approach on the content server which has a functioning JupyterHub)
```
#%reset -f
```
---
Now lets repeat the example using Pandas, here we will reuse the original lists, so there is some extra work to get the structures just so
```
import pandas
partsdf = pandas.DataFrame(parts)
partsdf.set_axis(parts[0][:],axis=1,inplace=True) # label the columns
partsdf.drop(0, axis=0, inplace = True) # remove the first row that held the column names
partsdf
suppliersdf = pandas.DataFrame(suppliers)
suppliersdf.set_axis(suppliers[0][:],axis=1,inplace=True) # label the columns
suppliersdf.drop(0, axis=0, inplace = True) # remove the first row that held the column names
suppliersdf
shipmentsdf = pandas.DataFrame(shipments)
shipmentsdf.set_axis(shipments[0][:],axis=1,inplace=True) # label the columns
shipmentsdf.drop(0, axis=0, inplace = True) # remove the first row that held the column names
shipmentsdf
```
Now lets learn about our three dataframes
```
partsdf.shape # this is a method to return shape, notice no argument list i.e. no ()
suppliersdf.shape
shipmentsdf.shape
partsdf['COLOR'] #Selecing a single column
partsdf[['COLOR','CITY']] #Selecing a multiple columns - note the names are supplied as a list
partsdf.loc[[5,6]] #Selecing rows based on label via loc[ ] indexer using row indices - note supplied as a list
```
Now lets query our dataframes, lets list all parts whose weight is less than 13,
Recall from before:
- We have to select the right table
- We have to construct a search to find all instances of parts with weight less than 13
- Print the list slice with the requesite information
We have to do these same activities, but the syntax is far more readable:
```
partsdf[partsdf['WEIGHT'] < 13] # from dataframe named partsdf, find all rows in column "WEIGHT less than 13, and return these rows"
```
Now lets query our dataframe, lets list all parts whose weight is less than 13 - but only list the part number, color, and city
- We have to select the right table
- We have to construct a search to find all instances of parts with weight less than 13
- Print the list slice with the requesite information
Again a more readable syntax
```
partsdf[partsdf['WEIGHT'] < 13][['PNO','COLOR','CITY']] # from dataframe named partsdf, find all rows in column "WEIGHT less than 13, and return part number, color, and city from these rows"
```
---
### `head` method
Returns the first few rows, useful to infer structure
```
shipmentsdf.head() # if you supply an argument you control how many rows are shown i.e. shipmentsdf.head(3) returns first 3 rows
```
---
### `tail` method
Returns the last few rows, useful to infer structure
```
shipmentsdf.tail()
```
---
### `info` method
Returns the data model (data column count, names, data types)
```
#Info about the dataframe
suppliersdf.info()
```
---
### `describe` method
Returns summary statistics of each numeric column.
Also returns the minimum and maximum value in each column, and the IQR (Interquartile Range).
Again useful to understand structure of the columns.
Our toy example contains limited numeric data, so describe is not too useful - but in general its super useful for engineering databases
```
#Statistics of the dataframe
partsdf.describe()
```
---
### Examples with "numerical" data
```
%reset -f
import numpy # we just reset the worksheet, so reimport the packages
import pandas
```
### Now we shall create a proper dataframe
We will now do the same using pandas
```
mydf = pandas.DataFrame(numpy.random.randint(1,100,(5,4)), ['A','B','C','D','E'], ['W','X','Y','Z'])
mydf
```
---
### Getting the shape of dataframes
The shape method, which is available after the dataframe is constructed, will return the row and column rank (count) of a dataframe.
```
mydf.shape
```
---
### Appending new columns
To append a column simply assign a value to a new column name to the dataframe
```
mydf['new']= 'NA'
mydf
```
---
### Appending new rows
This is sometimes a bit trickier but here is one way:
- create a copy of a row, give it a new name.
- concatenate it back into the dataframe.
```
newrow = mydf.loc[['E']].rename(index={"E": "X"}) # create a single row, rename the index
newtable = pandas.concat([mydf,newrow]) # concatenate the row to bottom of df - note the syntax
newtable
```
---
### Removing Rows and Columns
To remove a column is straightforward, we use the drop method
```
newtable.drop('new', axis=1, inplace = True)
newtable
```
To remove a row, you really got to want to, easiest is probablty to create a new dataframe with the row removed
```
newtable = newtable.loc[['A','B','D','E','X']] # select all rows except C
newtable
# or just use drop with axis specify
newtable.drop('X', axis=0, inplace = True)
newtable
```
---
## Indexing
We have already been indexing, but a few examples follow:
```
newtable['X'] #Selecing a single column
newtable[['X','W']] #Selecing a multiple columns
newtable.loc['E'] #Selecing rows based on label via loc[ ] indexer
newtable
newtable.loc[['E','D','B']] #Selecing multiple rows based on label via loc[ ] indexer
newtable.loc[['B','E','D'],['X','Y']] #Selecting elements via both rows and columns via loc[ ] indexer
```
---
## Conditional Selection
```
mydf = pandas.DataFrame({'col1':[1,2,3,4,5,6,7,8],
'col2':[444,555,666,444,666,111,222,222],
'col3':['orange','apple','grape','mango','jackfruit','watermelon','banana','peach']})
mydf
#What fruit corresponds to the number 555 in ‘col2’?
mydf[mydf['col2']==555]['col3']
#What fruit corresponds to the minimum number in ‘col2’?
mydf[mydf['col2']==mydf['col2'].min()]['col3']
```
---
## Descriptor Functions
```
#Creating a dataframe from a dictionary
mydf = pandas.DataFrame({'col1':[1,2,3,4,5,6,7,8],
'col2':[444,555,666,444,666,111,222,222],
'col3':['orange','apple','grape','mango','jackfruit','watermelon','banana','peach']})
mydf
#Returns only the first five rows
mydf.head()
```
---
### `info` method
Returns the data model (data column count, names, data types)
```
#Info about the dataframe
mydf.info()
```
---
### `describe` method
Returns summary statistics of each numeric column.
Also returns the minimum and maximum value in each column, and the IQR (Interquartile Range).
Again useful to understand structure of the columns.
```
#Statistics of the dataframe
mydf.describe()
```
---
### Counting and Sum methods
There are also methods for counts and sums by specific columns
```
mydf['col2'].sum() #Sum of a specified column
```
The `unique` method returns a list of unique values (filters out duplicates in the list, underlying dataframe is preserved)
```
mydf['col2'].unique() #Returns the list of unique values along the indexed column
```
The `nunique` method returns a count of unique values
```
mydf['col2'].nunique() #Returns the total number of unique values along the indexed column
```
The `value_counts()` method returns the count of each unique value (kind of like a histogram, but each value is the bin)
```
mydf['col2'].value_counts() #Returns the number of occurences of each unique value
```
---
## Using functions in dataframes - symbolic apply
The power of **Pandas** is an ability to apply a function to each element of a dataframe series (or a whole frame) by a technique called symbolic (or synthetic programming) application of the function.
This employs principles of **pattern matching**, **abstraction**, and **algorithm development**; a holy trinity of Computational Thinning.
It's somewhat complicated but quite handy, best shown by an example:
```
def times2(x): # A prototype function to scalar multiply an object x by 2
return(x*2)
print(mydf)
print('Apply the times2 function to col2')
mydf['reallynew'] = mydf['col2'].apply(times2) #Symbolic apply the function to each element of column col2, result is another dataframe
mydf
```
---
## Sorts
```
mydf.sort_values('col2', ascending = True) #Sorting based on columns
mydf.sort_values('col3', ascending = True) #Lexiographic sort
```
---
## Aggregating (Grouping Values) dataframe contents
```
#Creating a dataframe from a dictionary
data = {
'key' : ['A', 'B', 'C', 'A', 'B', 'C'],
'data1' : [1, 2, 3, 4, 5, 6],
'data2' : [10, 11, 12, 13, 14, 15],
'data3' : [20, 21, 22, 13, 24, 25]
}
mydf1 = pandas.DataFrame(data)
mydf1
# Grouping and summing values in all the columns based on the column 'key'
mydf1.groupby('key').sum()
# Grouping and summing values in the selected columns based on the column 'key'
mydf1.groupby('key')[['data1', 'data2']].sum()
```
---
## Filtering out missing values
Filtering and *cleaning* are often used to describe the process where data that does not support a narrative is removed ;typically for maintenance of profit applications, if the data are actually missing that is common situation where cleaning is justified.
```
#Creating a dataframe from a dictionary
df = pandas.DataFrame({'col1':[1,2,3,4,None,6,7,None],
'col2':[444,555,None,444,666,111,None,222],
'col3':['orange','apple','grape','mango','jackfruit','watermelon','banana','peach']})
df
```
Below we drop any row that contains a `NaN` code.
```
df_dropped = df.dropna()
df_dropped
```
Below we replace `NaN` codes with some value, in this case 0
```
df_filled1 = df.fillna(0)
df_filled1
```
Below we replace `NaN` codes with some value, in this case the mean value of of the column in which the missing value code resides.
```
df_filled2 = df.fillna(df.mean())
df_filled2
```
---
## Reading a File into a Dataframe
Pandas has methods to read common file types, such as `csv`,`xls`, and `json`.
Ordinary text files are also quite manageable.
> Specifying `engine='openpyxl'` in the read/write statement is required for the xml versions of Excel (xlsx). Default is .xls regardless of file name. If you still encounter read errors, try opening the file in Excel and saving as .xls (Excel 97-2004 Workbook) or as a CSV if the structure is appropriate.<br><br>
> You may have to install the packages using something like <br>`sudo -H /opt/jupyterhub/bin/python3 -m pip install xlwt openpyxl xlsxwriter xlrd` from the Anaconda Prompt interface (adjust the path to your system) or something like <br>`sudo -H /opt/conda/envs/python/bin/python -m pip install xlwt openpyxl xlsxwriter xlrd`
The files in the following examples are [CSV_ReadingFile.csv](http://54.243.252.9/engr-1330-webroot/1-Lessons/Lesson11/CSV_ReadingFile.csv), [Excel_ReadingFile.xlsx](http://54.243.252.9/engr-1330-webroot/1-Lessons/Lesson11/Excel_ReadingFile.xlsx),
```
readfilecsv = pandas.read_csv('CSV_ReadingFile.csv') #Reading a .csv file
print(readfilecsv)
```
Similar to reading and writing .csv files, you can also read and write .xslx files as below (useful to know this)
```
readfileexcel = pandas.read_excel('Excel_ReadingFile.xlsx', sheet_name='Sheet1', engine='openpyxl') #Reading a .xlsx file
print(readfileexcel)
```
# Writing a dataframe to file
```
#Creating and writing to a .csv file
readfilecsv = pandas.read_csv('CSV_ReadingFile.csv')
readfilecsv.to_csv('CSV_WritingFile1.csv') # write to local directory
readfilecsv = pandas.read_csv('CSV_WritingFile1.csv') # read the file back
print(readfilecsv)
#Creating and writing to a .csv file by excluding row labels
readfilecsv = pandas.read_csv('CSV_ReadingFile.csv')
readfilecsv.to_csv('CSV_WritingFile2.csv', index = False)
readfilecsv = pandas.read_csv('CSV_WritingFile2.csv')
print(readfilecsv)
#Creating and writing to a .xls file
readfileexcel = pandas.read_excel('Excel_ReadingFile.xlsx', sheet_name='Sheet1', engine='openpyxl')
readfileexcel.to_excel('Excel_WritingFile.xlsx', sheet_name='Sheet1' , index = False, engine='openpyxl')
readfileexcel = pandas.read_excel('Excel_WritingFile.xlsx', sheet_name='Sheet1', engine='openpyxl')
print(readfileexcel)
```
---
## References
Overland, B. (2018). Python Without Fear. Addison-Wesley
ISBN 978-0-13-468747-6.
Grus, Joel (2015). Data Science from Scratch: First Principles with Python O’Reilly
Media. Kindle Edition.
Precord, C. (2010) wxPython 2.8 Application Development Cookbook Packt Publishing Ltd. Birmingham , B27 6PA, UK
ISBN 978-1-849511-78-0.
Johnson, J. (2020). Python Numpy Tutorial (with Jupyter and Colab). Retrieved September 15, 2020, from https://cs231n.github.io/python-numpy-tutorial/
Willems, K. (2019). (Tutorial) Python NUMPY Array TUTORIAL. Retrieved September 15, 2020, from https://www.datacamp.com/community/tutorials/python-numpy-tutorial?utm_source=adwords_ppc
Willems, K. (2017). NumPy Cheat Sheet: Data Analysis in Python. Retrieved September 15, 2020, from https://www.datacamp.com/community/blog/python-numpy-cheat-sheet
W3resource. (2020). NumPy: Compare two given arrays. Retrieved September 15, 2020, from https://www.w3resource.com/python-exercises/numpy/python-numpy-exercise-28.php
Sorting https://www.programiz.com/python-programming/methods/list/sort
https://www.oreilly.com/library/view/relational-theory-for/9781449365431/ch01.html
https://realpython.com/pandas-read-write-files/#using-pandas-to-write-and-read-excel-files
---
## Laboratory 11
**Examine** (click) Laboratory 11 as a webpage at [Laboratory 11.html](http://54.243.252.9/engr-1330-webroot/8-Labs/Lab11/Lab11.html)
**Download** (right-click, save target as ...) Laboratory 11 as a jupyterlab notebook from [Laboratory 11.ipynb](http://54.243.252.9/engr-1330-webroot/8-Labs/Lab11/Lab11.ipynb)
<hr><hr>
## Exercise Set 11
**Examine** (click) Exercise Set 11 as a webpage at [Exercise 11.html](http://54.243.252.9/engr-1330-webroot/8-Labs/Lab11/Lab11-TH.html)
**Download** (right-click, save target as ...) Exercise Set 11 as a jupyterlab notebook at [Exercise Set 11.ipynb](http://54.243.252.9/engr-1330-webroot/8-Labs/Lab11/Lab11-TH.ipynb)
| github_jupyter |
```
from __future__ import division
from salishsea_tools import rivertools
from salishsea_tools import nc_tools
import numpy as np
import matplotlib.pyplot as plt
import netCDF4 as nc
import arrow
import numpy.ma as ma
import sys
sys.path.append('/ocean/klesouef/meopar/tools/I_ForcingFiles/Rivers')
%matplotlib inline
# Constant and data ranges etc
year = 2015
smonth = 6
emonth = 6
startdate = arrow.get(year,smonth,15)
enddate = arrow.get(year,emonth,29)
print (startdate, enddate)
# get Fraser Flow data
filename = '/data/dlatorne/SOG-projects/SOG-forcing/ECget/Fraser_flow'
fraserflow = np.loadtxt(filename)
print (fraserflow)
#Fraser watershed
pd = rivertools.get_watershed_prop_dict_long_fraser('fraser')
totalfraser = (pd['Fraser1']['prop'] + pd['Fraser2']['prop'] +
pd['Fraser3']['prop'] + pd['Fraser4']['prop'])
# Climatology, Fraser Watershed
fluxfile = nc.Dataset('/ocean/jieliu/research/meopar/nemo-forcing/rivers/Salish_allrivers_monthly.nc','r')
climFraserWaterShed = fluxfile.variables['fraser'][:]
# Fraser River at Hope Seasonal Climatology (found in matlab using Mark's mean daily data)
climFraseratHope = (931, 878, 866, 1814, 4097, 6970, 5538, 3539, 2372, 1937, 1595, 1119)
NonHope = climFraserWaterShed - climFraseratHope
otherratio = 0.016
fraserratio = 1-otherratio
nonFraser = (otherratio * climFraserWaterShed.sum()/NonHope.sum()) * NonHope
afterHope = NonHope - nonFraser
print (pd['Fraser1']['i'],pd['Fraser1']['j'])
def calculate_daily_flow(r,criverflow):
'''interpolate the daily values from the monthly values'''
print (r.day, r.month)
if r.day < 16:
prevmonth = r.month-1
if prevmonth == 0:
prevmonth = 12
nextmonth = r.month
else:
prevmonth = r.month
nextmonth = r.month + 1
if nextmonth == 13:
nextmonth = 1
fp = r - arrow.get(year,prevmonth,15)
fn = arrow.get(year,nextmonth,15) - r
ft = fp+fn
fp = fp.days/ft.days
fn = fn.days/ft.days
print (ft, fp, fn)
driverflow = fn*criverflow[prevmonth-1] + fp*criverflow[nextmonth-1]
return driverflow
def write_file(r,flow,lat,lon,riverdepth):
''' given the flow and the riverdepth and the date, write the nc file'''
directory = '.'
# set up filename to follow NEMO conventions
filename = 'NewRFraserCElse_y'+str(year)+'m'+'{:0=2}'.format(r.month)+'d'+'{:0=2}'.format(r.day)+'.nc'
# print directory+'/'+filename
nemo = nc.Dataset(directory+'/'+filename, 'w')
nemo.description = 'Real Fraser Values, Daily Climatology for Other Rivers'
# dimensions
ymax, xmax = lat.shape
nemo.createDimension('x', xmax)
nemo.createDimension('y', ymax)
nemo.createDimension('time_counter', None)
# variables
# latitude and longitude
nav_lat = nemo.createVariable('nav_lat','float32',('y','x'),zlib=True)
nav_lat = lat
x = nemo.createVariable('nav_lon','float32',('y','x'),zlib=True)
nav_lon = lon
# time
time_counter = nemo.createVariable('time_counter', 'float32', ('time_counter'),zlib=True)
time_counter.units = 'non-dim'
time_counter[0:1] = range(1,2)
# runoff
rorunoff = nemo.createVariable('rorunoff', 'float32', ('time_counter','y','x'), zlib=True)
rorunoff._Fillvalue = 0.
rorunoff._missing_value = 0.
rorunoff._units = 'kg m-2 s-1'
rorunoff[0,:] = flow
# depth
rodepth = nemo.createVariable('rodepth','float32',('y','x'),zlib=True)
rodepth._Fillvalue = -1.
rodepth.missing_value = -1.
rodepth.units = 'm'
rodepth = riverdepth
nemo.close()
return
def fraser_correction(pd, fraserflux, r, afterHope, NonFraser, fraserratio, otherratio,
runoff):
''' for the Fraser Basin only, replace basic values with the new climatology after Hope and the
observed values for Hope. Note, we are changing runoff only and not using/changing river
depth '''
for key in pd:
if "Fraser" in key:
flux = calculate_daily_flow(r,afterHope) + fraserflux
subarea = fraserratio
else:
flux = calculate_daily_flow(r,NonFraser)
subarea = otherratio
river = pd[key]
runoff = rivertools.fill_runoff_array(flux*river['prop']/subarea,river['i'],
river['di'],river['j'],river['dj'],river['depth'],
runoff,np.empty_like(runoff))[0]
return runoff
```
* Open climatology files
```
##open climatolgy file with modified fresh water point source
clim_rivers_edit = nc.Dataset('/ocean/jieliu/research/meopar/river-treatment/rivers_month_nole.nc','r' )
criverflow_edit = clim_rivers_edit.variables['rorunoff']
lat = clim_rivers_edit.variables['nav_lat']
lon = clim_rivers_edit.variables['nav_lon']
riverdepth = clim_rivers_edit.variables['rodepth']
criverflow_edit[0,500,395]
for r in arrow.Arrow.range('day', startdate, enddate):
print (r)
driverflow = calculate_daily_flow(r, criverflow_edit)
storeflow = calculate_daily_flow(r, criverflow_edit)
step1 = fraserflow[fraserflow[:,0] == r.year]
step2 = step1[step1[:,1] == r.month]
step3 = step2[step2[:,2] == r.day]
# print r.year, r.month, r.day, step3[0,3]
runoff = fraser_correction(pd, step3[0,3] , r, afterHope, nonFraser, fraserratio, otherratio,
driverflow)
write_file(r,runoff,lat,lon,riverdepth)
ig = 500
jg = 395
print (criverflow_edit[7:10,500,395], driverflow[ig,jg])
print (storeflow[ig,jg], driverflow[ig,jg])
#ig = 351; jg = 345
#print storeflow[ig,jg], driverflow[ig,jg]
#ig = 749; jg=123
#print storeflow[ig,jg], driverflow[ig,jg]
# jan 0, feb 1, mar 2, apr 3, may 4, jun 5
# jul 6, aug 7, sep 8
```
| github_jupyter |
# **Quora Question Pairs**
## **1. Business Problem**
### **1.1 Description**
Quora is a place to gain and share knowledge—about anything. It’s a platform to ask questions and connect with people who contribute unique insights and quality answers. This empowers people to learn from each other and to better understand the world.
Over 100 million people visit Quora every month, so it's no surprise that many people ask similarly worded questions. Multiple questions with the same intent can cause seekers to spend more time finding the best answer to their question, and make writers feel they need to answer multiple versions of the same question. Quora values canonical questions because they provide a better experience to active seekers and writers, and offer more value to both of these groups in the long term.
> Credits: Kaggle
**Problem Statement**
- Identify which questions asked on Quora are duplicates of questions that have already been asked.
- This could be useful to instantly provide answers to questions that have already been answered.
- We are tasked with predicting whether a pair of questions are duplicates or not.
### **1.2 Sources/Useful Links**
- Source : https://www.kaggle.com/c/quora-question-pairs
Useful Links
- Discussions : https://www.kaggle.com/anokas/data-analysis-xgboost-starter-0-35460-lb/comments
- Kaggle Winning Solution and other approaches: https://www.dropbox.com/sh/93968nfnrzh8bp5/AACZdtsApc1QSTQc7X0H3QZ5a?dl=0
- Blog 1 : https://engineering.quora.com/Semantic-Question-Matching-with-Deep-Learning
- Blog 2 : https://towardsdatascience.com/identifying-duplicate-questions-on-quora-top-12-on-kaggle-4c1cf93f1c30
### **1.3 Real world/Business Objectives and Constraints**
1. The cost of a mis-classification can be very high.
2. You would want a probability of a pair of questions to be duplicates so that you can choose any threshold of choice.
3. No strict latency concerns.
4. Interpretability is partially important.
## **2. Machine Learning Probelm**
### **2.1 Data**
#### **2.1.1 Data Overview**
- Data will be in a file Train.csv
- Train.csv contains 5 columns : qid1, qid2, question1, question2, is_duplicate
- Size of Train.csv - 60MB
- Number of rows in Train.csv = 404,290
#### **2.1.2 Example Data point**
<pre>
"id","qid1","qid2","question1","question2","is_duplicate"
"0","1","2","What is the step by step guide to invest in share market in india?","What is the step by step guide to invest in share market?","0"
"1","3","4","What is the story of Kohinoor (Koh-i-Noor) Diamond?","What would happen if the Indian government stole the Kohinoor (Koh-i-Noor) diamond back?","0"
"7","15","16","How can I be a good geologist?","What should I do to be a great geologist?","1"
"11","23","24","How do I read and find my YouTube comments?","How can I see all my Youtube comments?","1"
</pre>
### **2.2 Mapping the real world problem to an ML problem**
#### **2.2.1 Type of Machine Leaning Problem**
It is a binary classification problem, for a given pair of questions we need to predict if they are duplicate or not.
#### **2.2.2 Performance Metric**
Source: https://www.kaggle.com/c/quora-question-pairs#evaluation
Metric(s):
* log-loss : https://www.kaggle.com/wiki/LogarithmicLoss
* Binary Confusion Matrix
### **2.3 Train and Test Construction**
We build train and test by randomly splitting in the ratio of 70:30 or 80:20 whatever we choose as we have sufficient points to work with.
## **3. Exploratory Data Analysis**
```
!pip install Distance
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from subprocess import check_output
%matplotlib inline
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
import os
import gc
import re
from nltk.corpus import stopwords
import distance
from nltk.stem import PorterStemmer
from bs4 import BeautifulSoup
```
### **3.1 Reading data and basic stats**
```
from google.colab import files
uploaded = files.upload()
df = pd.read_csv("train.csv")
print("Number of data points:",df.shape[0])
df.head()
df.info()
```
We are given a minimal number of data fields here, consisting of:
- id: Looks like a simple rowID
- qid{1, 2}: The unique ID of each question in the pair
- question{1, 2}: The actual textual contents of the questions.
- is_duplicate: The label that we are trying to predict - whether the two questions are duplicates of each other.
#### **3.2.1 Distribution of data points among output classes**
- Number of duplicate(smilar) and non-duplicate(non similar) questions
```
df.groupby("is_duplicate")['id'].count().plot.bar()
print('~> Total number of question pairs for training:\n {}'.format(len(df)))
print('~> Question pairs are not Similar (is_duplicate = 0):\n {}%'.format(100 - round(df['is_duplicate'].mean()*100, 2)))
print('\n~> Question pairs are Similar (is_duplicate = 1):\n {}%'.format(round(df['is_duplicate'].mean()*100, 2)))
```
#### **3.2.2 Number of unique questions**
```
qids = pd.Series(df['qid1'].tolist() + df['qid2'].tolist())
unique_qs = len(np.unique(qids))
qs_morethan_onetime = np.sum(qids.value_counts() > 1)
print ('Total number of Unique Questions are: {}\n'.format(unique_qs))
#print len(np.unique(qids))
print ('Number of unique questions that appear more than one time: {} ({}%)\n'.format(qs_morethan_onetime,qs_morethan_onetime/unique_qs*100))
print ('Max number of times a single question is repeated: {}\n'.format(max(qids.value_counts())))
q_vals=qids.value_counts()
q_vals=q_vals.values
x = ["unique_questions" , "Repeated Questions"]
y = [unique_qs , qs_morethan_onetime]
plt.figure(figsize=(10, 6))
plt.title ("Plot representing unique and repeated questions ")
sns.barplot(x,y)
plt.show()
```
#### **3.2.3 Checking for Duplicates**
```
#checking whether there are any repeated pair of questions
pair_duplicates = df[['qid1','qid2','is_duplicate']].groupby(['qid1','qid2']).count().reset_index()
print ("Number of duplicate questions",(pair_duplicates).shape[0] - df.shape[0])
```
#### **3.2.4 Number of occurrences of each question**
```
plt.figure(figsize=(20, 10))
plt.hist(qids.value_counts(), bins=160)
plt.yscale('log', nonposy='clip')
plt.title('Log-Histogram of question appearance counts')
plt.xlabel('Number of occurences of question')
plt.ylabel('Number of questions')
print ('Maximum number of times a single question is repeated: {}\n'.format(max(qids.value_counts())))
```
#### **3.2.5 Checking for NULL values**
```
#Checking whether there are any rows with null values
nan_rows = df[df.isnull().any(1)]
print (nan_rows)
```
- There are two rows with null values in question2
```
# Filling the null values with ' '
df = df.fillna('')
nan_rows = df[df.isnull().any(1)]
print (nan_rows)
```
### **3.3 Basic Feature Extraction (before cleaning)**
Let us now construct a few features like:
- **freq_qid1** = Frequency of qid1's
- **freq_qid2** = Frequency of qid2's
- **q1len** = Length of q1
- **q2len** = Length of q2
- **q1_n_words** = Number of words in Question 1
- **q2_n_words** = Number of words in Question 2
- **word_Common** = (Number of common unique words in Question 1 and Question 2)
- **word_Total** =(Total num of words in Question 1 + Total num of words in Question 2)
- **word_share** = (word_common)/(word_Total)
- **freq_q1+freq_q2** = sum total of frequency of qid1 and qid2
- **freq_q1-freq_q2** = absolute difference of frequency of qid1 and qid2
```
if os.path.isfile('df_fe_without_preprocessing_train.csv'):
df = pd.read_csv("df_fe_without_preprocessing_train.csv",encoding='latin-1')
else:
df['freq_qid1'] = df.groupby('qid1')['qid1'].transform('count')
df['freq_qid2'] = df.groupby('qid2')['qid2'].transform('count')
df['q1len'] = df['question1'].str.len()
df['q2len'] = df['question2'].str.len()
df['q1_n_words'] = df['question1'].apply(lambda row: len(row.split(" ")))
df['q2_n_words'] = df['question2'].apply(lambda row: len(row.split(" ")))
def normalized_word_Common(row):
w1 = set(map(lambda word: word.lower().strip(), row['question1'].split(" ")))
w2 = set(map(lambda word: word.lower().strip(), row['question2'].split(" ")))
return 1.0 * len(w1 & w2)
df['word_Common'] = df.apply(normalized_word_Common, axis=1)
def normalized_word_Total(row):
w1 = set(map(lambda word: word.lower().strip(), row['question1'].split(" ")))
w2 = set(map(lambda word: word.lower().strip(), row['question2'].split(" ")))
return 1.0 * (len(w1) + len(w2))
df['word_Total'] = df.apply(normalized_word_Total, axis=1)
def normalized_word_share(row):
w1 = set(map(lambda word: word.lower().strip(), row['question1'].split(" ")))
w2 = set(map(lambda word: word.lower().strip(), row['question2'].split(" ")))
return 1.0 * len(w1 & w2)/(len(w1) + len(w2))
df['word_share'] = df.apply(normalized_word_share, axis=1)
df['freq_q1+q2'] = df['freq_qid1']+df['freq_qid2']
df['freq_q1-q2'] = abs(df['freq_qid1']-df['freq_qid2'])
df.to_csv("df_fe_without_preprocessing_train.csv", index=False)
df.head()
```
#### **3.3.1 Analysis of some of the extracted features**
- Here are some questions have only one single words.
```
print ("Minimum length of the questions in question1 : " , min(df['q1_n_words']))
print ("Minimum length of the questions in question2 : " , min(df['q2_n_words']))
print ("Number of Questions with minimum length [question1] :", df[df['q1_n_words']== 1].shape[0])
print ("Number of Questions with minimum length [question2] :", df[df['q2_n_words']== 1].shape[0])
```
##### **3.3.1.1 Feature: word_share**
```
plt.figure(figsize=(12, 8))
plt.subplot(1,2,1)
sns.violinplot(x = 'is_duplicate', y = 'word_share', data = df[0:])
plt.subplot(1,2,2)
sns.distplot(df[df['is_duplicate'] == 1.0]['word_share'][0:] , label = "1", color = 'red')
sns.distplot(df[df['is_duplicate'] == 0.0]['word_share'][0:] , label = "0" , color = 'blue' )
plt.show()
```
- The distributions for normalized word_share have some overlap on the far right-hand side, i.e., there are quite a lot of questions with high word similarity
- The average word share and Common no. of words of qid1 and qid2 is more when they are duplicate(Similar)
##### **3.3.1.2 Feature: word_Common**
```
plt.figure(figsize=(12, 8))
plt.subplot(1,2,1)
sns.violinplot(x = 'is_duplicate', y = 'word_Common', data = df[0:])
plt.subplot(1,2,2)
sns.distplot(df[df['is_duplicate'] == 1.0]['word_Common'][0:] , label = "1", color = 'red')
sns.distplot(df[df['is_duplicate'] == 0.0]['word_Common'][0:] , label = "0" , color = 'blue' )
plt.show()
```
The distributions of the word_Common feature in similar and non-similar questions are highly overlapping
| github_jupyter |
# River Sediment Supply Modeling with HydroTrend
If you have never used the CSDMS Python Modeling Toolkit (PyMT), learn how to use it here.
We are using a theoretical river basin of ~1990 km2, with 1200m of relief and a river length of
~100 km. All parameters that are shown by default once the HydroTrend Model is loaded are based
on a present-day, temperate climate. Whereas these runs are not meant to be specific, we are
using parameters that are realistic for the [Waiapaoa River][map_of_waiapaoa] in New Zealand. The Waiapaoa River
is located on North Island and receives high rain and has erodible soils, so the river sediment
loads are exceptionally high. It has been called the *"dirtiest small river in the world"*.
To learn more about HydroTrend and its approach to sediment supply modeling, you can download
this [presentation][hydrotrend_presentation].
[map_of_waiapaoa]: https://www.google.com/maps/place/Waipaoa+River/@-38.5099042,177.7668002,71814m/data=!3m1!1e3!4m5!3m4!1s0x6d65def908624859:0x2a00ef6165e1dfa0!8m2!3d-38.5392405!4d177.8843782
[hydrotrend_presentation]: https://csdms.colorado.edu/wiki/File:SedimentSupplyModeling02_2013.ppt
```
import matplotlib.pyplot as plt
import numpy as np
import pymt
hydrotrend = pymt.plugins.Hydrotrend()
```
HydroTrend will now be active in the WMT. The HydroTrend Parameter list is used to set the parameters for any simulation. You can set the parameters by going through the different tabs in the parameter list. Once your input is set up, you save the information. Then, you can run it by hitting the arrow run button. This way you generate a job script that can be submitted to Beach-the CSDMS High Performance Computing System. Provide your Beach account information (i.e. user name and password) to get the run started. The status page allows you to keep track of a simulation. From the status page you can eventually download your output files.
## Exercise 1: Explore the base-case river simulation
The default "base-case" simulation for 50 years at daily time-step. This means you run Hydrotrend for 18,250 days total.
```
config_file, config_folder = hydrotrend.setup()
hydrotrend.initialize(config_file, config_folder)
hydrotrend.output_var_names
hydrotrend.get_start_time(), hydrotrend.get_current_time(), hydrotrend.get_end_time(), hydrotrend.get_time_step(), hydrotrend.get_time_units()
n_days = int(hydrotrend.get_end_time())
q = np.empty(n_days)
qs = np.empty(n_days)
cs = np.empty(n_days)
qb = np.empty(n_days)
for i in range(n_days):
hydrotrend.update()
q[i] = hydrotrend.get_value("channel_exit_water__volume_flow_rate")
qs[i] = hydrotrend.get_value("channel_exit_water_sediment~suspended__mass_flow_rate")
cs[i] = hydrotrend.get_value("channel_exit_water_sediment~suspended__mass_concentration")
qb[i] = hydrotrend.get_value("channel_exit_water_sediment~bedload__mass_flow_rate")
plt.plot(qs)
```
## Q1a: Calculate mean water discharge Q, mean suspended load Qs, mean sediment concentration Cs, and mean bedload Qb.
*Note all values are reported as daily averages. What are the units?*
*A1a*:
```
(
(q.mean(), hydrotrend.get_var_units("channel_exit_water__volume_flow_rate")),
(cs.mean(), hydrotrend.get_var_units("channel_exit_water_sediment~suspended__mass_flow_rate")),
(qs.mean(), hydrotrend.get_var_units("channel_exit_water_sediment~suspended__mass_concentration")),
(qb.mean(), hydrotrend.get_var_units("channel_exit_water_sediment~bedload__mass_flow_rate"))
)
hydrotrend.get_var_units("channel_exit_water__volume_flow_rate")
```
## Q1b: Identify the highest flood event for this simulation. Is this the 50-year flood? Plot the year of Q-data which includes the flood.
*A1b*:
```
flood_day = q.argmax()
flood_year // 365
plt.plot(q[flood_year * 365: (flood_year + 1) * 365])
q.max()
```
## Q1c: Calculate the mean annual sediment load for this river system.
*A1c*:
```
qs_by_year = qs.reshape((-1, 365))
qs_annual = qs_by_year.sum(axis=1)
plt.plot(qs_annual)
qs_annual.mean()
```
## Q1d: How does the sediment yield of this river system compare to the present-day Mississippi River?
*To compare the mean annual load to other river systems you will need to calculate its sediment yield.
Sediment Yield is defined as sediment load normalized for the river drainage area;
so it can be reported in T/km2/yr.*
*A1d*:
# Exercise 2: How does a river system respond to climate change; a few simple scenarios for the coming century.
Now we will look at changing climatic conditions in a small river basin. We'll change temperature and precipitation regimes and compare discharge and sediment load characteristics to the original basecase. And we will look at the are potential implications of changes in the peak events.
Modify the mean annual temperature T, the mean annual precipitation P, and its the variability of the yearly means through the standard deviation. You can specify trends over time, by modifying the parameter ‘change in mean annual temperature’ or ‘change in mean annual precipitation’. HydroTrend runs at daily timestep, and thus can deal with seasonal variations in temperature and precipitation for a basin. The model ingests monthly mean input values for these two climate parameters and their monthly standard deviations, ideally the values would be derived from analysis of a longterm record of daily climate data. You can adapt seasonal trends by using the monthly values.
## Q2a: What happens to discharge, suspended load and bedload if the mean annual temperature in this specific river basin increases by 4 °C over 50 years?
*A2a*:
## Q2b: How much increase of discharge do you see after 50 years? How is the average suspended load affected? How does the bedload change? What happens to the peak event; look at the maximum discharge event of the last 10 years of the simulation?
*A2b*:
## Q2c: In addition, climate model predictions indicate that perhaps precipitation intensity and variability could increase. How would you model this; discuss all your input settings for precipitation.
*A2c*:
# Exercise 3: How do humans affect river sediment loads?
Here we will look at the effect of human in a river basin. Humans can accelerate erosion
processes, or reduce the sediment loads traveling through a river system. Both concepts can
be simulated, first run 3 simulations systematically increasing the anthropogenic factor (0.5-8.0 is the range).
## Q3a: Describe in your own words the meaning of the human-induced erosion factor, (Eh) (Syvitski & Milliman, 2007). This factor is parametrized as the “Antropogenic” factor in HydroTrend. See references for the paper.
*A3a*:
Model a scenario of a drinking water supply reservoir to be planned in the coastal area of the basin. The reservoir would have 800 km 2of contributing drainage area and be 3 km long, 200m wide and 100m deep. Set up a simulation with these parameters.
## Q3b: How would such a reservoir affect the sediment load at the coast (i.e. downstream of the reservoir)?
*A3b*:
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
%aiida
```
# Create structure using pymatgen
```
#!pip install pymatgen
import sys
sys.path.insert(0, '../src/')
from view import *
from functions import *
from pymatgen.core.structure import Structure, Lattice
from pymatgen.transformations.advanced_transformations import CubicSupercellTransformation
struct = Structure.from_spacegroup('P6/mmm',
Lattice.hexagonal(2.46,15),
['C'],
[[1/3,2/3,1/2]])
tfms = CubicSupercellTransformation().apply_transformation(struct)
len(tfms)
tfms = tfms*[2,2,1]
view_top(tfms)
tfms.lattice
```
## Definitions for creating sac structures
### Single TM
```
def create_small_(element,adsorbate=True):
sup = CubicSupercellTransformation().apply_transformation(struct)
idx = []
for i,coords in enumerate(sup.cart_coords):
if coords[0] < 3 : idx.append(i)
elif coords[0] > 14: idx.append(i)
elif coords[1] < 3 : idx.append(i)
elif coords[1] > 14 : idx.append(i)
sup.remove_sites(idx)
sup.remove_sites(find_corner_idx(sup))
sup.remove_sites(mid_idx(sup))
for i in around_idx(sup):
sup[i] = 'N'
for i in h_idx(sup): sup[i]='H'
if adsorbate==True:
sup.append('H',[find_x_center(sup),find_y_center(sup),9.0],coords_are_cartesian=True)
sup.append(element,[find_x_center(sup),find_y_center(sup),7.5],coords_are_cartesian=True)
return sup
def create_large_(element,adsorbate=True):
sup = CubicSupercellTransformation().apply_transformation(struct)
sup = sup*[2,2,1]
idx = []
for i,coords in enumerate(sup.cart_coords):
if coords[0] < 1.5: idx.append(i)
elif coords[0] > 23: idx.append(i)
elif coords[1] < 1 : idx.append(i)
elif coords[1] > 20 : idx.append(i)
sup.remove_sites(idx)
sup.remove_sites(find_corner_ix(sup,mid=False))
sup.remove_sites(mid_idx(sup))
for i in around_idx(sup):
sup[i] = 'N'
for i in h_idx(sup): sup[i]='H'
for i in h_idx2(sup): sup[i]='H'
if adsorbate==True:
sup.append('H',[find_x_center(sup),find_y_center(sup),9.0],coords_are_cartesian=True)
sup.append(element,[find_x_center(sup),find_y_center(sup),7.5],coords_are_cartesian=True)
return sup
def create_medium_(element,adsorbate=True):
sup = CubicSupercellTransformation().apply_transformation(struct)
sup = sup*[2,2,1]
idx = []
for i,coords in enumerate(sup.cart_coords):
if coords[0] < 8 : idx.append(i)
elif coords[0] > 24.5: idx.append(i)
elif coords[1] < 9.5 : idx.append(i)
elif coords[1] > 24.8 : idx.append(i)
sup.remove_sites(idx)
sup.remove_sites(find_corner_ix(sup))
sup.remove_sites(mid_idx(sup))
for i in around_idx(sup):
sup[i] = 'N'
for i in h_idx(sup): sup[i]='H'
if adsorbate==True:
sup.append('H',[find_x_center(sup),find_y_center(sup),9.0],coords_are_cartesian=True)
sup.append(element,[find_x_center(sup),find_y_center(sup),7.5],coords_are_cartesian=True)
return sup
```
### Co-TM
```
def create_large_co(element1,element2,adsorbate=True):
sup = CubicSupercellTransformation().apply_transformation(struct)
sup = sup*[2,2,1]
idx = []
for i,coords in enumerate(sup.cart_coords):
if coords[0] < 1.5: idx.append(i)
elif coords[0] > 23: idx.append(i)
elif coords[1] < 1 : idx.append(i)
elif coords[1] > 20 : idx.append(i)
sup.remove_sites(idx)
sup.remove_sites(find_corner_ix(sup,mid=False))
sup.remove_sites(mid_idx_co(sup))
for i in around_idx_co(sup):
sup[i] = 'N'
for i in h_idx(sup): sup[i]='H'
for i in h_idx2(sup): sup[i]='H'
tm1 = [find_x_center_co(sup)[0],
find_y_center_co(sup),
7.5]
tm2 = [find_x_center_co(sup)[1],
find_y_center_co(sup),
7.5]
h1 = [find_x_center_co(sup)[0],
find_y_center_co(sup),
9.0]
h2 = [find_x_center_co(sup)[1],
find_y_center_co(sup),
9.0]
if adsorbate==True:
sup.append('H',h1,coords_are_cartesian=True)
sup.append('H',h2,coords_are_cartesian=True)
sup.append(element1,tm1,coords_are_cartesian=True) #(11.07,12.07)
sup.append(element2,tm2,coords_are_cartesian=True) #(13.53,12.07)
return sup
```
## View Structures
```
view_top(create_large_('Sc')) # Single TM
view_top(create_large_co('Sc','V',adsorbate=True)) # Co TM
```
## Create list of structures
```
tms = ['Sc','Ti','V','Cr','Mn','Fe','Co','Ni','Cu','Zn','Zr','Nb','Mo','Tc',\
'Ru','Rh','Pd','Ag','Cd','Hf','Ta','W','Re','Os','Ir','Pt','Au']
small_total=[]
for tm in tms:
small_total.append(create_small_(tm))
medium_total=[]
for tm in tms:
medium_total.append(create_medium_(tm))
large_total=[]
for tm in tms:
large_total.append(create_large_(tm))
small=[]
for tm in tms:
small.append(create_small_(tm,adsorbate=False))
medium=[]
for tm in tms:
medium.append(create_medium_(tm,adsorbate=False))
large=[]
for tm in tms:
large.append(create_large_(tm,adsorbate=False))
len(small), len(medium), len(large), len(small_total), len(medium_total), len(large_total)
```
## Write POSCAR file
```
from pymatgen.io.vasp.inputs import Poscar
supp = create_large_co('Sc','V',adsorbate=True)
test = Poscar(supp)
test.write_file('POSCAR_co')
```
## Run aiida for all LF
```
from aiida import orm
from aiida import plugins
from aiida.plugins import DataFactory
from aiida.engine import submit
from aiida.orm.nodes.data.upf import get_pseudos_from_structure
PwBaseWorkChain = plugins.WorkflowFactory('quantumespresso.pw.base')
code = load_code('qe-6.6-pw@arcc-msi')
structures = small
StructureData = DataFactory("structure")
KpointsData = DataFactory('array.kpoints')
kpoints = KpointsData()
kpoints.set_kpoints_mesh([1,1,1])
inputs = {
'pw': {
'code': code,
'parameters': orm.Dict(dict={
'CONTROL': {
'calculation':'scf',
},
'SYSTEM':{
'ecutwfc':150.,
'occupations':'smearing',
'degauss':0.02
},
'ELECTRONS':{
'conv_thr':1.e-6,
}
}),
'metadata':{
'label':'LF-smallH',
'options':{
'account':'rd-hea',
'resources':{
'num_machines':1,
'num_cores_per_mpiproc':32
},
'max_wallclock_seconds':1*24*60*60,
'max_memory_kb':int(128e6)
}
}
},
'kpoints': kpoints,
}
for structure in structures:
inputs['pw']['structure'] = StructureData(pymatgen_structure=structure)
inputs['pw']['pseudos'] = get_pseudos_from_structure(StructureData(pymatgen=structure),'SSSP')
submit(PwBaseWorkChain, **inputs)
```
## Run aiida for HF calcs
```
a = np.array([13,14,15,16,20,21,22,23,24])
small_redo = [small_total[i] for i in a] # accidentally did 6-LARGE
view_top(small_redo[1])
len(small_total[613])
from aiida import orm
from aiida import plugins
from aiida.plugins import DataFactory, WorkflowFactory
from aiida.engine import submit
from aiida.orm.nodes.data.upf import get_pseudos_from_structure
# PwBaseWorkChain = WorkflowFactory('quantumespresso.pw.base')
PwRelaxWorkChain = WorkflowFactory('quantumespresso.pw.relax')
code = load_code('qe-6.6-pw@arcc-msi')
structures = small_redo
StructureData = DataFactory("structure")
KpointsData = DataFactory('array.kpoints')
kpoints = KpointsData()
kpoints.set_kpoints_mesh([3,3,1])
inputs = {
'base':{
'pw': {
'code': code,
'parameters': orm.Dict(dict={
'SYSTEM':{
'ecutwfc':300.,
'occupations':'smearing',
'degauss':0.02
},
'ELECTRONS':{
'conv_thr':1.e-6,
}
}),
'metadata':{
'label':'HF-medium',
'options':{
'account':'rd-hea',
'resources':{
'num_machines':4,
'num_cores_per_mpiproc':32
},
'max_wallclock_seconds':2*24*60*60,
'max_memory_kb':int(128e6)
}
}
},
'kpoints': kpoints,
},
'relaxation_scheme':orm.Str('relax')
}
for structure in structures:
inputs['structure'] = StructureData(pymatgen=structure)
inputs['base']['pw']['pseudos'] = get_pseudos_from_structure(StructureData(pymatgen=structure),'SSSP')
# inputs['base_final_scf']['pw']['pseudos'] = get_pseudos_from_structure(StructureData(pymatgen=structure),'SSSP')
submit(PwRelaxWorkChain, **inputs)
```
## Results
### 150 eV SMALL LF
```
lst_out = [15025,15029,15036,15040,15047,15051,15058,15062,15069,\
15073,15077,15084,15091,15095,15099,15106,15110,15117,\
15121,15128,15132,15139,15143,15150,15154,15161,15165]
LF_small_out=[]
lst_out = [15025,15029,15036,15040,15047,15051,15058,15062,15069,\
15073,15077,15084,15091,15095,15099,15106,15110,15117,\
15121,15128,15132,15139,15143,15150,15154,15161,15165]
d_1 = pd.DataFrame(small_comp,columns=['comp'])
for i in lst_out:
LF_small_out.append(load_node(i).outputs.output_parameters.\
dict.energy)
d_1['Ecat_LF']=LF_small_out
d_1.head()
d_1
d_1['E_LF']=d_1['EcatH_LF']-d_1['Ecat_LF']+16
d_1.reset_index().plot(x='index', y='E_LF',kind='bar')
```
### 150 eV SMALL+H LF
```
tms = ['Sc','Ti','V','Cr','Mn','Fe','Co','Ni','Cu',\
'Zn','Zr','Nb','Mo','Tc','Ru','Rh','Pd','Ag',\
'Cd','Hf','Ta','W','Re','Os','Ir','Pt','Au']
import pandas as pd
from aiida import orm
from aiida import plugins
from aiida.plugins import DataFactory
from aiida.engine import submit
from aiida.orm.nodes.data.upf import get_pseudos_from_structure
StructureData = DataFactory("structure")
KpointsData = DataFactory('array.kpoints')
small_comp = [StructureData(pymatgen_structure=f).get_formula() for f in small]
# d_2 = pd.DataFrame(small_comp,columns=['comp'])
LF_small_out=[]
lst_out = [14699,14703,14710,14714,14721,14725,14732,14736,14743,\
14747,14754,14758,14765,14769,14776,14780,14787,14791,\
14798,14802,14809,14813,14820,14824,14831,14835,14839]
for i in lst_out:
LF_small_out.append(load_node(i).outputs.output_parameters.\
dict.energy)
d_1['EcatH_LF']=LF_small_out
d_1.head()
d_1.to_csv('LF_small_150.csv')
```
### 300 eV SMALL HF
```
tms = ['Sc','Ti','V','Cr','Mn','Fe','Co','Ni','Cu',\
'Zn','Zr','Nb','Mo','Tc','Ru','Rh','Pd','Ag',\
'Cd','Hf','Ta','W','Re','Os','Ir','Pt','Au']
import pandas as pd
from aiida import orm
from aiida import plugins
from aiida.plugins import DataFactory
from aiida.engine import submit
from aiida.orm.nodes.data.upf import get_pseudos_from_structure
StructureData = DataFactory("structure")
KpointsData = DataFactory('array.kpoints')
small_comp = [StructureData(pymatgen_structure=f).get_formula() for f in small]
d_2 = pd.DataFrame(small_comp,columns=['comp'])
LF_small_out=[]
lst_out = [12388,12396,12407,12418,12429,12440,12451,12462,12470,\
12481,12492,12503,12511,12807,12815,12826,12837,12848,\
12859,12867,12878,12889,12900,12908,12919,12930,12941]
for i in lst_out:
LF_small_out.append(load_node(i).outputs.output_parameters.\
dict.energy)
d_2['Ecat_HF']=LF_small_out
d_2.head()
```
### 300 eV SMALL+H HF
```
tms = ['Sc','Ti','V','Cr','Mn','Fe','Co','Ni','Cu',\
'Zn','Zr','Nb','Mo','Tc','Ru','Rh','Pd','Ag',\
'Cd','Hf','Ta','W','Re','Os','Ir','Pt','Au']
import pandas as pd
from aiida import orm
from aiida import plugins
from aiida.plugins import DataFactory
from aiida.engine import submit
from aiida.orm.nodes.data.upf import get_pseudos_from_structure
StructureData = DataFactory("structure")
KpointsData = DataFactory('array.kpoints')
small_comp = [StructureData(pymatgen_structure=f).get_formula() for f in small]
# d_2 = pd.DataFrame(small_comp,columns=['comp'])
LF_small_out=[]
lst_out = [13285,13293,13304,13315,13326,13678,13905,13356,13367,\
13378,13916,13697,13924,14395,14403,14414,14425,14046,\
14057,14068,14436,14447,14458,14466,14477,14128,14139]
for i in lst_out:
LF_small_out.append(load_node(i).outputs.output_parameters.\
dict.energy)
d_2['EcatH_HF']=LF_small_out
d_2.head()
d_2.to_csv('HF_small_300.csv')
d_2['E_HF']=d_2['EcatH_HF']-d_2['Ecat_HF']+16
d_2.reset_index().plot(x='index', y='E_HF',kind='bar')
```
| github_jupyter |
# NetworKit User Guide
## About NetworKit
[NetworKit][networkit] is an open-source toolkit for high-performance
network analysis. Its aim is to provide tools for the analysis of large
networks in the size range from thousands to billions of edges. For this
purpose, it implements efficient graph algorithms, many of them parallel to
utilize multicore architectures. These are meant to compute standard measures
of network analysis, such as degree sequences, clustering coefficients and
centrality. In this respect, NetworKit is comparable
to packages such as [NetworkX][networkx], albeit with a focus on parallelism
and scalability. NetworKit is also a testbed for algorithm engineering and
contains a few novel algorithms from recently published research, especially
in the area of community detection.
[networkit]: http://parco.iti.kit.edu/software/networkit.shtml
[networkx]: http://networkx.github.com/
## Introduction
This notebook provides an interactive introduction to the features of NetworKit, consisting of text and executable code. We assume that you have read the Readme and successfully built the core library and the Python module. Code cells can be run one by one (e.g. by selecting the cell and pressing `shift+enter`), or all at once (via the `Cell->Run All` command). Try running all cells now to verify that NetworKit has been properly built and installed.
## Preparation
This notebook creates some plots. To show them in the notebook, matplotlib must be imported and we need to activate matplotlib's inline mode:
```
%matplotlib inline
import matplotlib.pyplot as plt
```
NetworKit is a hybrid built from C++ and Python code: Its core functionality is implemented in C++ for performance reasons, and then wrapped for Python using the Cython toolchain. This allows us to expose high-performance parallel code as a normal Python module. On the surface, NetworKit is just that and can be imported accordingly:
```
import networkit as nk
```
## Reading and Writing Graphs
Let us start by reading a network from a file on disk: `PGPgiantcompo.graph` network. In the course of this tutorial, we are going to work on the PGPgiantcompo network, a social network/web of trust in which nodes are PGP keys and an edge represents a signature from one key on another. It is distributed with NetworKit as a good starting point.
There is a convenient function in the top namespace which tries to guess the input format and select the appropriate reader:
```
G = nk.readGraph("../input/PGPgiantcompo.graph", nk.Format.METIS)
```
There is a large variety of formats for storing graph data in files. For NetworKit, the currently best supported format is the [METIS adjacency format](http://people.sc.fsu.edu/~jburkardt/data/metis_graph/metis_graph.html). Various example graphs in this format can be found [here](http://www.cc.gatech.edu/dimacs10/downloads.shtml). The `readGraph` function tries to be an intelligent wrapper for various reader classes. In this example, it uses the `METISGraphReader` which is located in the `graphio` submodule, alongside other readers. These classes can also be used explicitly:
```
G = nk.graphio.METISGraphReader().read("../input/PGPgiantcompo.graph")
# is the same as: readGraph("input/PGPgiantcompo.graph", Format.METIS)
```
It is also possible to specify the format for `readGraph()` and `writeGraph()`. Supported formats can be found via `[graphio.]Format`. However, graph formats are most likely only supported as far as the NetworKit::Graph can hold and use the data. Please note, that not all graph formats are supported for reading and writing.
Thus, it is possible to use NetworKit to convert graphs between formats. Let's say I need the previously read PGP graph in the Graphviz format:
```
import os
if not os.path.isdir('./output/'):
os.makedirs('./output')
nk.graphio.writeGraph(G,"output/PGPgiantcompo.graphviz", nk.Format.GraphViz)
```
NetworKit also provides a function to convert graphs directly:
```
nk.graphio.convertGraph(nk.Format.LFR, nk.Format.GML, "../input/example.edgelist", "output/example.gml")
```
## The Graph Object
`Graph` is the central class of NetworKit. An object of this type represents an undirected, optionally weighted network. Let us inspect several of the methods which the class provides.
```
print(G.numberOfNodes(), G.numberOfEdges())
```
Nodes are simply integer indices, and edges are pairs of such indices.
```
for u in G.iterNodes():
if u > 5:
print('...')
break
print(u)
i = 0
for u, v in G.iterEdges():
if i > 5:
print('...')
break
print(u, v)
i += 1
i = 0
for u, v, w in G.iterEdgesWeights():
if i > 5:
print('...')
break
print(u, v, w)
i += 1
```
This network is unweighted, meaning that each edge has the default weight of 1.
```
G.weight(42, 11)
```
## Connected Components
A connected component is a set of nodes in which each pair of nodes is connected by a path. The following function determines the connected components of a graph:
```
cc = nk.components.ConnectedComponents(G)
cc.run()
print("number of components ", cc.numberOfComponents())
v = 0
print("component of node ", v , ": " , cc.componentOfNode(0))
print("map of component sizes: ", cc.getComponentSizes())
```
## Degree Distribution
Node degree, the number of edges connected to a node, is one of the most studied properties of networks. Types of networks are often characterized in terms of their distribution of node degrees. We obtain and visualize the degree distribution of our example network as follows.
```
dd = sorted(nk.centrality.DegreeCentrality(G).run().scores(), reverse=True)
plt.xscale("log")
plt.xlabel("degree")
plt.yscale("log")
plt.ylabel("number of nodes")
plt.plot(dd)
plt.show()
```
We choose a logarithmic scale on both axes because a _powerlaw degree distribution_, a characteristic feature of complex networks, would show up as a straight line from the top left to the bottom right on such a plot. As we see, the degree distribution of the `PGPgiantcompo` network is definitely skewed, with few high-degree nodes and many low-degree nodes. But does the distribution actually obey a power law? In order to study this, we need to apply the [powerlaw](https://pypi.python.org/pypi/powerlaw) module. Call the following function:
```
try:
import powerlaw
fit = powerlaw.Fit(dd)
except ImportError:
print ("Module powerlaw could not be loaded")
```
The powerlaw coefficient can then be retrieved via:
```
try:
import powerlaw
fit.alpha
except ImportError:
print ("Module powerlaw could not be loaded")
```
If you further want to know how "good" it fits the power law distribution, you can use the the `distribution_compare`-function. From the documentation of the function:
> R : float
>
> Loglikelihood ratio of the two distributions' fit to the data. If
> greater than 0, the first distribution is preferred. If less than
> 0, the second distribution is preferred.
> p : float
>
> Significance of R
```
try:
import powerlaw
fit.distribution_compare('power_law','exponential')
except ImportError:
print ("Module powerlaw could not be loaded")
```
## Community Detection
This section demonstrates the community detection capabilities of NetworKit. Community detection is concerned with identifying groups of nodes which are significantly more densely connected to eachother than to the rest of the network.
Code for community detection is contained in the `community` module. The module provides a top-level function to quickly perform community detection with a suitable algorithm and print some stats about the result.
```
nk.community.detectCommunities(G)
```
The function prints some statistics and returns the partition object representing the communities in the network as an assignment of node to community label. Let's capture this result of the last function call.
```
communities = nk.community.detectCommunities(G)
```
*Modularity* is the primary measure for the quality of a community detection solution. The value is in the range `[-0.5,1]` and usually depends both on the performance of the algorithm and the presence of distinctive community structures in the network.
```
nk.community.Modularity().getQuality(communities, G)
```
### The Partition Data Structure
The result of community detection is a partition of the node set into disjoint subsets. It is represented by the `Partition` data strucure, which provides several methods for inspecting and manipulating a partition of a set of elements (which need not be the nodes of a graph).
```
type(communities)
print("{0} elements assigned to {1} subsets".format(communities.numberOfElements(),
communities.numberOfSubsets()))
print("the biggest subset has size {0}".format(max(communities.subsetSizes())))
```
The contents of a partition object can be written to file in a simple format, in which each line *i* contains the subset id of node *i*.
```
nk.community.writeCommunities(communities, "output/communties.partition")
```
### Choice of Algorithm
The community detection function used a good default choice for an algorithm: *PLM*, our parallel implementation of the well-known Louvain method. It yields a high-quality solution at reasonably fast running times. Let us now apply a variation of this algorithm.
```
nk.community.detectCommunities(G, algo=nk.community.PLM(G, True))
```
We have switched on refinement, and we can see how modularity is slightly improved. For a small network like this, this takes only marginally longer.
### Visualizing the Result
We can easily plot the distribution of community sizes as follows. While the distribution is skewed, it does not seem to fit a power-law, as shown by a log-log plot.
```
sizes = communities.subsetSizes()
sizes.sort(reverse=True)
ax1 = plt.subplot(2,1,1)
ax1.set_ylabel("size")
ax1.plot(sizes)
ax2 = plt.subplot(2,1,2)
ax2.set_xscale("log")
ax2.set_yscale("log")
ax2.set_ylabel("size")
ax2.plot(sizes)
plt.show()
```
## Search and Shortest Paths
A simple breadth-first search from a starting node can be performed as follows:
```
v = 0
bfs = nk.distance.BFS(G, v)
bfs.run()
bfsdist = bfs.getDistances(False)
```
The return value is a list of distances from `v` to other nodes - indexed by node id. For example, we can now calculate the mean distance from the starting node to all other nodes:
```
sum(bfsdist) / len(bfsdist)
```
Similarly, Dijkstra's algorithm yields shortest path distances from a starting node to all other nodes in a weighted graph. Because `PGPgiantcompo` is an unweighted graph, the result is the same here:
```
dijkstra = nk.distance.Dijkstra(G, v)
dijkstra.run()
spdist = dijkstra.getDistances(False)
sum(spdist) / len(spdist)
```
## Centrality
[Centrality](http://en.wikipedia.org/wiki/Centrality) measures the relative importance of a node within a graph. Code for centrality analysis is grouped into the `centrality` module.
### Betweenness Centrality
We implement Brandes' algorithm for the exact calculation of betweenness centrality. While the algorithm is efficient, it still needs to calculate shortest paths between all pairs of nodes, so its scalability is limited. We demonstrate it here on the small Karate club graph.
```
K = nk.readGraph("../input/karate.graph", nk.Format.METIS)
bc = nk.centrality.Betweenness(K)
bc.run()
```
We have now calculated centrality values for the given graph, and can retrieve them either as an ordered ranking of nodes or as a list of values indexed by node id.
```
bc.ranking()[:10] # the 10 most central nodes
```
### Approximation of Betweenness
Since exact calculation of betweenness scores is often out of reach, NetworKit provides an approximation algorithm based on path sampling. Here we estimate betweenness centrality in `PGPgiantcompo`, with a probabilistic guarantee that the error is no larger than an additive constant $\epsilon$.
```
abc = nk.centrality.ApproxBetweenness(G, epsilon=0.1)
abc.run()
```
The 10 most central nodes according to betweenness are then
```
abc.ranking()[:10]
```
### Eigenvector Centrality and PageRank
Eigenvector centrality and its variant PageRank assign relative importance to nodes according to their connections, incorporating the idea that edges to high-scoring nodes contribute more. PageRank is a version of eigenvector centrality which introduces a damping factor, modeling a random web surfer which at some point stops following links and jumps to a random page. In PageRank theory, centrality is understood as the probability of such a web surfer to arrive on a certain page. Our implementation of both measures is based on parallel power iteration, a relatively simple eigensolver.
```
# Eigenvector centrality
ec = nk.centrality.EigenvectorCentrality(K)
ec.run()
ec.ranking()[:10] # the 10 most central nodes
# PageRank
pr = nk.centrality.PageRank(K, 1e-6)
pr.run()
pr.ranking()[:10] # the 10 most central nodes
```
## Core Decomposition
A $k$-core decomposition of a graph is performed by successicely peeling away nodes with degree less than $k$. The remaining nodes form the $k$-core of the graph.
```
K = nk.readGraph("../input/karate.graph", nk.Format.METIS)
coreDec = nk.centrality.CoreDecomposition(K)
coreDec.run()
```
Core decomposition assigns a core number to each node, being the maximum $k$ for which a node is contained in the $k$-core. For this small graph, core numbers have the following range:
```
set(coreDec.scores())
nk.viztasks.drawGraph(K, node_size=[(k**2)*20 for k in coreDec.scores()])
plt.show()
```
## Subgraph
NetworKit supports the creation of Subgraphs depending on an original graph and a set of nodes. This might be useful in case you want to analyze certain communities of a graph. Let's say that community 2 of the above result is of further interest, so we want a new graph that consists of nodes and intra cluster edges of community 2.
```
c2 = communities.getMembers(2)
g2 = nk.graphtools.subgraphFromNodes(G, c2)
communities.subsetSizeMap()[2]
g2.numberOfNodes()
```
As we can see, the number of nodes in our subgraph matches the number of nodes of community 2. The subgraph can be used like any other graph object, e.g. further community analysis:
```
communities2 = nk.community.detectCommunities(g2)
nk.viztasks.drawCommunityGraph(g2,communities2)
plt.show()
```
## NetworkX Compatibility
[NetworkX](http://en.wikipedia.org/wiki/Centrality) is a popular Python package for network analysis. To let both packages complement each other, and to enable the adaptation of existing NetworkX-based code, we support the conversion of the respective graph data structures.
```
import networkx as nx
nxG = nk.nxadapter.nk2nx(G) # convert from NetworKit.Graph to networkx.Graph
print(nx.degree_assortativity_coefficient(nxG))
```
## Generating Graphs
An important subfield of network science is the design and analysis of generative models. A variety of generative models have been proposed with the aim of reproducing one or several of the properties we find in real-world complex networks. NetworKit includes generator algorithms for several of them.
The **Erdös-Renyi model** is the most basic random graph model, in which each edge exists with the same uniform probability. NetworKit provides an efficient generator:
```
ERD = nk.generators.ErdosRenyiGenerator(200, 0.2).generate()
print(ERD.numberOfNodes(), ERD.numberOfEdges())
```
## Transitivity / Clustering Coefficients
In the most general sense, transitivity measures quantify how likely it is that the relations out of which the network is built are transitive. The clustering coefficient is the most prominent of such measures. We need to distinguish between global and local clustering coefficient: The global clustering coefficient for a network gives the fraction of closed triads. The local clustering coefficient focuses on a single node and counts how many of the possible edges between neighbors of the node exist. The average of this value over all nodes is a good indicator for the degreee of transitivity and the presence of community structures in a network, and this is what the following function returns:
```
nk.globals.clustering(G)
```
A simple way to generate a **random graph with community structure** is to use the `ClusteredRandomGraphGenerator`. It uses a simple variant of the Erdös-Renyi model: The node set is partitioned into a given number of subsets. Nodes within the same subset have a higher edge probability.
```
CRG = nk.generators.ClusteredRandomGraphGenerator(200, 4, 0.2, 0.002).generate()
nk.community.detectCommunities(CRG)
```
The **Chung-Lu model** (also called **configuration model**) generates a random graph which corresponds to a given degree sequence, i.e. has the same expected degree sequence. It can therefore be used to replicate some of the properties of a given real networks, while others are not retained, such as high clustering and the specific community structure.
```
degreeSequence = [CRG.degree(v) for v in CRG.nodes()]
clgen = nk.generators.ChungLuGenerator(degreeSequence)
CLG = clgen.generate()
nk.community.detectCommunities(CLG)
```
## Settings
In this section we discuss global settings.
### Logging
When using NetworKit from the command line, the verbosity of console output can be controlled via several loglevels, from least to most verbose: `FATAL`, `ERROR`, `WARN`, `INFO`, `DEBUG` and `TRACE`. (Currently, logging is only available on the console and not visible in the IPython Notebook).
```
nk.getLogLevel() # the default loglevel
nk.setLogLevel("TRACE") # set to most verbose mode
nk.setLogLevel("ERROR") # set back to default
```
Please note, that the default build setting is optimized (`--optimize=Opt`) and thus, every LOG statement below INFO is removed. If you need DEBUG and TRACE statements, please build the extension module by appending `--optimize=Dbg` when calling the setup script.
### Parallelism
The degree of parallelism can be controlled and monitored in the following way:
```
nk.setNumberOfThreads(4) # set the maximum number of available threads
nk.getMaxNumberOfThreads() # see maximum number of available threads
nk.getCurrentNumberOfThreads() # the number of threads currently executing
```
## Support
NetworKit is an open-source project that improves with suggestions and contributions from its users. The [mailing list](https://sympa.cms.hu-berlin.de/sympa/subscribe/networkit) is the place for general discussion and questions.
| github_jupyter |
# LASSO and Ridge Regression
This function shows how to use TensorFlow to solve lasso or ridge regression for $\boldsymbol{y} = \boldsymbol{Ax} + \boldsymbol{b}$
We will use the iris data, specifically: $\boldsymbol{y}$ = Sepal Length, $\boldsymbol{x}$ = Petal Width
```
# import required libraries
import matplotlib.pyplot as plt
import sys
import numpy as np
import tensorflow as tf
from sklearn import datasets
from tensorflow.python.framework import ops
# Specify 'Ridge' or 'LASSO'
regression_type = 'LASSO'
# clear out old graph
ops.reset_default_graph()
# Create graph
sess = tf.Session()
```
## Load iris data
```
# iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)]
iris = datasets.load_iris()
x_vals = np.array([x[3] for x in iris.data])
y_vals = np.array([y[0] for y in iris.data])
```
## Model Parameters
```
# Declare batch size
batch_size = 50
# Initialize placeholders
x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
# make results reproducible
seed = 13
np.random.seed(seed)
tf.set_random_seed(seed)
# Create variables for linear regression
A = tf.Variable(tf.random_normal(shape=[1,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
# Declare model operations
model_output = tf.add(tf.matmul(x_data, A), b)
```
## Loss Functions
```
# Select appropriate loss function based on regression type
if regression_type == 'LASSO':
# Declare Lasso loss function
# Lasso Loss = L2_Loss + heavyside_step,
# Where heavyside_step ~ 0 if A < constant, otherwise ~ 99
lasso_param = tf.constant(0.9)
heavyside_step = tf.truediv(1., tf.add(1., tf.exp(tf.multiply(-50., tf.subtract(A, lasso_param)))))
regularization_param = tf.multiply(heavyside_step, 99.)
loss = tf.add(tf.reduce_mean(tf.square(y_target - model_output)), regularization_param)
elif regression_type == 'Ridge':
# Declare the Ridge loss function
# Ridge loss = L2_loss + L2 norm of slope
ridge_param = tf.constant(1.)
ridge_loss = tf.reduce_mean(tf.square(A))
loss = tf.expand_dims(tf.add(tf.reduce_mean(tf.square(y_target - model_output)), tf.multiply(ridge_param, ridge_loss)), 0)
else:
print('Invalid regression_type parameter value',file=sys.stderr)
```
## Optimizer
```
# Declare optimizer
my_opt = tf.train.GradientDescentOptimizer(0.001)
train_step = my_opt.minimize(loss)
```
## Run regression
```
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
# Training loop
loss_vec = []
for i in range(1500):
rand_index = np.random.choice(len(x_vals), size=batch_size)
rand_x = np.transpose([x_vals[rand_index]])
rand_y = np.transpose([y_vals[rand_index]])
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
loss_vec.append(temp_loss[0])
if (i+1)%300==0:
print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b)))
print('Loss = ' + str(temp_loss))
print('\n')
```
## Extract regression results
```
# Get the optimal coefficients
[slope] = sess.run(A)
[y_intercept] = sess.run(b)
# Get best fit line
best_fit = []
for i in x_vals:
best_fit.append(slope*i+y_intercept)
```
## Plot results
```
%matplotlib inline
# Plot the result
plt.plot(x_vals, y_vals, 'o', label='Data Points')
plt.plot(x_vals, best_fit, 'r-', label='Best fit line', linewidth=3)
plt.legend(loc='upper left')
plt.title('Sepal Length vs Pedal Width')
plt.xlabel('Pedal Width')
plt.ylabel('Sepal Length')
plt.show()
# Plot loss over time
plt.plot(loss_vec, 'k-')
plt.title(regression_type + ' Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('Loss')
plt.show()
tested; Gopal
```
| github_jupyter |
## Use a Decision Optimization model deployed in Watson Machine Learning
This notebook shows you how to create and monitor jobs, and get solutions using the Watson Machine Learning Python Client.
This example only applies to Decision Optimization in Watson Machine Learning Local and Cloud Pak for Data/Watson Studio Local.
In order to use this example, you must first have deployed the Diet example.
A Python API is provided to submit input data, solve, and get results.
```
# Uninstall the Watson Machine Learning client Python client based on v3 APIs
!pip uninstall watson-machine-learning-client -y
# Install WML client API
!pip install ibm-watson-machine-learning
from ibm_watson_machine_learning import APIClient
# Instantiate a client using credentials
wml_credentials = {
"apikey": "<API_key>",
"url": "<instance_url>"
}
client = APIClient(wml_credentials)
# Find the space ID
space_name = '<SPACE NAME>'
space_id = [x['metadata']['id'] for x in client.spaces.get_details()['resources'] if x['entity']['name'] == space_name][0]
client.set.default_space(space_id)
# Import pandas library
import pandas as pd
# initialize list of lists
diet_food = pd.DataFrame([ ["Roasted Chicken", 0.84, 0, 10],
["Spaghetti W/ Sauce", 0.78, 0, 10],
["Tomato,Red,Ripe,Raw", 0.27, 0, 10],
["Apple,Raw,W/Skin", 0.24, 0, 10],
["Grapes", 0.32, 0, 10],
["Chocolate Chip Cookies", 0.03, 0, 10],
["Lowfat Milk", 0.23, 0, 10],
["Raisin Brn", 0.34, 0, 10],
["Hotdog", 0.31, 0, 10]] , columns = ["name","unit_cost","qmin","qmax"])
diet_food_nutrients = pd.DataFrame([
["Spaghetti W/ Sauce", 358.2, 80.2, 2.3, 3055.2, 11.6, 58.3, 8.2],
["Roasted Chicken", 277.4, 21.9, 1.8, 77.4, 0, 0, 42.2],
["Tomato,Red,Ripe,Raw", 25.8, 6.2, 0.6, 766.3, 1.4, 5.7, 1],
["Apple,Raw,W/Skin", 81.4, 9.7, 0.2, 73.1, 3.7, 21, 0.3],
["Grapes", 15.1, 3.4, 0.1, 24, 0.2, 4.1, 0.2],
["Chocolate Chip Cookies", 78.1, 6.2, 0.4, 101.8, 0, 9.3, 0.9],
["Lowfat Milk", 121.2, 296.7, 0.1, 500.2, 0, 11.7, 8.1],
["Raisin Brn", 115.1, 12.9, 16.8, 1250.2, 4, 27.9, 4],
["Hotdog", 242.1, 23.5, 2.3, 0, 0, 18, 10.4 ]
] , columns = ["Food","Calories","Calcium","Iron","Vit_A","Dietary_Fiber","Carbohydrates","Protein"])
diet_nutrients = pd.DataFrame([
["Calories", 2000, 2500],
["Calcium", 800, 1600],
["Iron", 10, 30],
["Vit_A", 5000, 50000],
["Dietary_Fiber", 25, 100],
["Carbohydrates", 0, 300],
["Protein", 50, 100]
], columns = ["name","qmin","qmax"])
```
You can find the deployment ID in the Analytics deployment spaces.
Or by listing the deployment using the API.

```
client.deployments.list()
# Get the deployment ID from the Model name.
# Note, that there could be several deployments for one model
model_name = "diet"
deployment_uid = [x['metadata']['id'] for x in client.deployments.get_details()['resources'] if x['entity']['name'] == model_name][0]
print(deployment_uid)
```
Create and monitor a job with inline data for your deployed model.
Create a payload containing inline input data.
Create a new job with this payload and the deployment.
Get the job_uid.
```
solve_payload = {
client.deployments.DecisionOptimizationMetaNames.INPUT_DATA: [
{
"id":"diet_food.csv",
"values" : diet_food
},
{
"id":"diet_food_nutrients.csv",
"values" : diet_food_nutrients
},
{
"id":"diet_nutrients.csv",
"values" : diet_nutrients
}
],
client.deployments.DecisionOptimizationMetaNames.OUTPUT_DATA: [
{
"id":".*\.csv"
}
]
}
job_details = client.deployments.create_job(deployment_uid, solve_payload)
job_uid = client.deployments.get_job_uid(job_details)
print( job_uid )
```
Display job status until it is completed.
The first job of a new deployment might take some time as a compute node must be started.
```
from time import sleep
while job_details['entity']['decision_optimization']['status']['state'] not in ['completed', 'failed', 'canceled']:
print(job_details['entity']['decision_optimization']['status']['state'] + '...')
sleep(5)
job_details=client.deployments.get_job_details(job_uid)
print( job_details['entity']['decision_optimization']['status']['state'])
job_details['entity']['decision_optimization']['status']
```
Extract and display solution.
Display the output solution.
Display the KPI Total Calories value.
```
solution_table=[x for x in job_details['entity']['decision_optimization']['output_data'] if x['id'] == 'solution.csv'][0]
# Create a dataframe for the solution
solution = pd.DataFrame(solution_table['values'],
columns = solution_table['fields'])
solution.head()
print( job_details['entity']['decision_optimization']['solve_state']['details']['KPI.Total Calories'] )
```
| github_jupyter |
## Preparation
Welcome to the Vectice tutorial notebook!
Through this notebook, we will be illustrating how to log the following information into Vectice using the Vectice Python library:
- Dataset versions
- Model versions
- Runs and lineage
For more information on the tutorial, please refer to the "Vectice Tutorial Page" inside the app.
## Setup
Install Vectice
```
#Install Vectice Python library
# In this tutorial we will do code versioning using github, we also support gitlab
# and bitbucket: !pip install -q "vectice[github, gitlab, bitbucket]"
!pip install --q vectice[github]
#Verify if Vectice python library was installed
!pip3 show vectice
```
Here, the our data is stored in GCS. We should install the following GCS packages in order to be able to get it.
```
## GCS packages
!pip3 install --q fsspec
!pip3 install --q gcsfs
## Import the required packages for data preparation and model training
import string
from math import sqrt
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
%matplotlib inline
# Load scikit-learn packages
from sklearn.model_selection import train_test_split # Model Selection
from sklearn.metrics import mean_absolute_error, mean_squared_error # Model Evaluation
from sklearn.linear_model import LinearRegression # Linear Regression
from sklearn.tree import DecisionTreeRegressor, plot_tree # Decision Tree Regression
from sklearn.ensemble import RandomForestRegressor # Random Forest Regression
```
### Connect and authenticate to Vectice API
```
#Import the Vectice library
from vectice import Vectice
from vectice.models import JobType
from vectice.entity.model import ModelType
import logging
logging.basicConfig(level=logging.INFO)
# Specify the API endpoint for Vectice.
os.environ['VECTICE_API_ENDPOINT']= "beta.vectice.com"
# To use the Vectice Python library, you first need to authenticate your account using an API key.
# You can generate an API key from the Vectice UI, by going to the "API Tokens" tab in your workspace
# Copy and paste your API key here
os.environ['VECTICE_API_TOKEN'] = "QkZWM9EJD.0XeWYNgrVy7K69jq5azA4QkZWM9EJDpBPOLMm1xbl2w8vGR03d"
# Next, you need to specify the tutorial project where you will run this notebook using a
# "Project Token". You can find the "Project Token" under the "Settings" tab of your project.
# Copy and paste your Project Token here
# autocode = True enables you to track your git changes for your code automatically everytime you execute a run (see below).
vectice = Vectice(project_token="BpR8Go6eh84vybzZaLWj", autocode= True)
```
## Create a run
A run is an execution of a job. You can think of a job like a grouping of runs.
When creating a run we need to specify:
1) a job name (mandatory)
2) a job type (optional)
3) a run name (optional)
Job names, job types and run names are useful to group and search runs in the Vectice UI.
You can also specify inputs when you start your run and outputs when you end it. The inputs can be code, dataset and model versions and the outputs can be dataset and model versions.
```
vectice.create_run("job_name", JobType.PREPARATION, "run name").with_properties([("run key", "run prop")])
vectice.start_run(inputs=[inputs])
vectice.end_run(outputs=[outputs])
```
You can also use the Python context manager (with) to manage runs. This helps to end the run and it also marks its status as failed in the Vectice UI in case we have an error in the run.
```
vectice.create_run("job_name", JobType.PREPARATION, "run name").with_properties([("run key", "run prop")])
with vectice.start_run(inputs=[inputs]) as run:
#Add your code here
run.add_outputs(outputs=[outputs])
```
## Create a dataset and a dataset version
There are three ways to create a dataset in Vectice:
1- Creating a dataset without a connection
```
### Creating a dataset without a connection
vectice.create_dataset(dataset_name="dataset name",data_properties=[("key", "prop"), ("key2", "prop2")])
```
2- Creating a dataset with a connection
Getting the list of connections in the Workspace:
```
vectice.list_connections()
## Creating a dataset with a connection
vectice.create_dataset_with_connection_name(connection_name="connection name",
dataset_name="dataset name",
files=["gs://file_path/file_name.csv"],
data_properties=[("key", "prop"), ("key2", "prop2")])
## We can also use vectice.create_dataset_with_connection_id()
```
3- Create a dataset and a dataset version at the same time
When creating a new dataset version, if the parent dataset doesn't exist in the project, a new dataset is created automatically and it will contain the first version we created.
```
dataset_version = vectice.create_dataset_version().with_parent_name("new dataset").with_properties([("key", "prop")])
```
The Vectice library automatically detects if there have been changes to the dataset you are using. If it detects changes, it will generate a new version of your dataset automatically. Else, it's going to use the latest version of your dataset.
We can get the list of the datasets we have in the project by calling **vectice.list_datasets()**
```
vectice.list_datasets().list
```
We can also get the list of dataset versions by calling **vectice.list_dataset_versions(dataset_id)**
### Attach a dataset version as input or output to a run
```
vectice.create_run("job_name", JobType.PREPARATION, "run name").with_properties([("run key", "run prop")])
vectice.start_run(inputs=[dataset_version])
vectice.end_run
```
You can also use another existing dataset version by using the existing version name, number or id (if you use the id, you don't need to specify the parent dataset name or id).
```
dataset_version = vectice.create_dataset_version().with_parent_name("dataset").with_existing_version_number(1)
vectice.create_run("job_name", JobType.PREPARATION, "run name").with_properties([("run key", "run prop")])
vectice.start_run(inputs=[dataset_version])
vectice.end_run
```
## Create a code version
Vectice enables you to track your source code by creating code versions. This can be done automatically and manually.
### Creating a code version automatically
If you are using your local environment with GIT installed or JupyterLab etc... the code tracking can be automated by setting autocode=True when creating the Vectice instance.
### Creating a code version manually
You can create a code version manually by using:
1- **vectice.create_code_version_with_github_uri()** for GitHub
2- **vectice.create_code_version_with_gitlab_uri()** for GitLab
3- **vectice.create_code_version_with_bitbucket_uri()** for Bitbucket
```
## Example for code versioning with GitHub
code_version = Vectice.create_code_version_with_gitlab_uri("https://github.com/vectice/vectice-examples",
"Notebooks/Tutorial/Jupyter_notebooks/GCS_data/Tutorial_notebook_GCS_data.ipynb")
vectice.create_run("Job name", JobType.PREPARATION, "Run name").with_properties([("run key", "run prop")])
vectice.start_run(inputs=[code_version])
vectice.end_run()
```
## Creating models and model versions
Vectice enables you to create your models and model versions and log the metrics, hyperparameters and model properties
When creating a model version, if there is a model with the same name as the given model name in your project, a new model version is added to the given model. Else, a new model is created automatically.
```
Vectice.create_model_version().with_parent_name('Regressor')
```
You can declare your model metrics, hyperparameters, properties, type, the used algorithme and model attachments when creating a model version.
```
metrics = [('metric', value), ('metric 2', value)]
properties = [('property', value), ('property 2', value)]
model_version = vectice.create_model_version()
.with_parent_name("Regressor")
.with_algorithm("Decision Tree")
.with_type(ModelType.REGRESSION)
.with_properties(properties)
.with_metrics(metrics)
.with_attachments(["DecisionTree_6.png"])
.with_user_version()
```
Here we used with_user_version() for model versioning. You can provide a version name for your model version. An error will be thrown if the given user version already exists and if you don't provide a version name, the version name will be generated automatically.
### Attach a model version as input or output of a run
```
vectice.create_run("job_name", JobType.PREPARATION, "run name").with_properties([("run key", "run prop")])
vectice.start_run(inputs=[dataset_version])
metrics = [('metric', value), ('metric 2', value)]
properties = [('property', value), ('property 2', value)]
model_version = vectice.create_model_version().with_user_version().with_parent_name("Regressor").with_algorithm("Decision Tree").with_type(ModelType.REGRESSION).with_properties(properties).with_metrics(metrics).with_attachments(["DecisionTree_6.png"])
vectice.end_run(outputs=[model_version])
```
# Exercice
### Getting the data from GCS
We are going to load data stored in Google Cloud Storage, that is provided by Vectice for this tutorial.
You need a service account key to be able to get the data from your buckets on GCS. You can find more information about how to generate a key to access your data on GCS [here](https://doc.vectice.com/connections/google.html#google-cloud-storage).
```
## Provide the path to the service account JSON key file
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'readerKey.json'
# Once your file is loaded you can view your dataset in a Pandas dataframe.
df = pd.read_csv('gs://vectice_tutorial/kc_house_data_cleaned.csv')
# Run head to make sure the data was loaded properly
df.head()
```
### Data preparation
Let's split the dataset into train and test sets and save them in GCS. The GCS code has been commented out as the data has already been generated.
```
# The Vectice library automatically detects if there have been changes to the dataset you are using.
# If it detects changes, it will generate a new version of your dataset automatically. Else, it's going
# to use the latest version of your dataset.
# You can also use another dataset version by calling .with_existing_version_name('version name')
input_ds_version = vectice.create_dataset_version().with_parent_name("cleaned_kc_house_data")
# For this run, we will use the job name "80/20 Split" and the job type "PREPARATION"
# You can have multiple runs with the same job name
# We can use the Python context manager (with) to end the run and make its status as failed
## in the Vectice UI in case we have an error
vectice.create_run("80/20 Split", JobType.PREPARATION, "Data preparation")
with vectice.start_run(inputs=[input_ds_version]) as run:
# We will use an 80/20 split to prepare the data
test_size = 0.2
# We will set the random seed so we always generate the same split.
random_state = 42
train, test = train_test_split(df, test_size = test_size, random_state = random_state)
# We commented out the code to persist the training and testing test in GCS,
# because we already generated the data for you.
# We left the code below for convenience, in case you want to use your own credentials and GCS bucket.
# train.to_csv (r'gs://vectice_tutorial/training_data.csv', index = False, header = True)
# test.to_csv (r'gs://vectice_tutorial/testing_data.csv', index = False, header = True)
# Generate X_train, X_test, y_train, y_test, which we will need for modeling
X = df.drop("price", axis=1).values
y = df["price"].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
# Let's create new versions of the training and testing dataset if the data has changed.
# We will use the existing dataset created by Albert, so that we can append new
# dataset versions to it.
train_ds_version = vectice.create_dataset_version().with_parent_name("train_cleaned_kc_house_data")
test_ds_version = vectice.create_dataset_version().with_parent_name("test_cleaned_kc_house_data")
# Attach the output datasets to the run.
run.add_outputs(outputs=[train_ds_version,test_ds_version])
# We can preview one of our generated outputs to make sure that everything was executed properly.
X_train
```
## Modeling
We can get the list of the models existing in the project by calling **vectice.list_models()**
```
vectice.list_models().list
```
### Decision tree model
In this section let's use the decision tree algorithm and compare the accuracy to the logistic regression algorithm. We will try different values for the tree_depth. We will log the model parameters and metrics in Vectice.
```
# We can do a few runs with different max depth for the tree.
# Just change the value below and re-run this cell.
# The model versions you created will show up in the Vectice UI as new versions
# of the "Regressor" Model. You can easily compare them from there.
tree_depth = 6
vectice.create_run("DT-Model", JobType.TRAINING)
# We can use the Python context manager (with) to end the run and make its status as failed
## in the Vectice UI in case we have an error
with vectice.start_run(inputs=[train_ds_version,test_ds_version]) as run:
dtr = DecisionTreeRegressor(max_depth=tree_depth, min_samples_split=50)
dtr.fit(X_train,y_train)
dtr_pred = dtr.predict(X_test)
data_feature_names = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors',
'waterfront', 'view', 'condition', 'grade', 'sqft_above',
'sqft_basement', 'yr_built', 'yr_renovated', 'zipcode', 'lat',
'long', 'sqft_living15', 'sqft_lot15']
# Visualize the Decision Tree Model
plt.figure(figsize=(25, 10))
plot_tree(dtr, feature_names=data_feature_names, filled=True, fontsize=10)
plt.savefig("DecisionTree_6.png")
# We save the plot in order to be able to attach to the model version.
## We can attach the decision tree plot to the model version by using .with_attachments([Attachments])
MAE = mean_absolute_error(dtr_pred, y_test)
RMSE = sqrt(mean_squared_error(dtr_pred, y_test))
print("Root Mean Squared Error:", RMSE)
print("Mean Absolute Error:", MAE)
# Here we use with_user_version() to create a new model version. You can provide a version name
## for your model version. An error will be thrown if the given user version already exists and
### if you don't provide a version name, the version name will be generated automatically.
properties = [("Tree Depth",str(tree_depth))]
metrics = [("RMSE", RMSE), ("MAE", MAE)]
model_version = vectice.create_model_version().with_user_version().with_parent_name("Regressor").with_algorithm("Decision Tree").with_type(ModelType.REGRESSION).with_properties(properties).with_metrics(metrics).with_attachments(["DecisionTree_6.png"])
## We add the created model version as output of the run
run.add_outputs(outputs=[model_version])
```
### Model versions table
You can also get all the model versions you created in previous runs, for offline analysis and understanding in more details what's driving the models performance.
```
vectice.list_model_versions_dataframe(1859)
```
### Update your model
Vectice enables you to update your model by using **vectice.update_model()**
```
vectice.update_model(parent_name="Regressor", model_type=ModelType.REGRESSION, description="Model description")
```
Thank you and congratulations! You have succesfully completed this tutorial.
In this notebooks we have illustrated how you can capture your experiments, hyper-parameters, dataset versions and metrics using Vectice Python library.
You can now leverage Vectice UI for analysis, documentation and to engage a business conversation around the findings.
Vectice enables you to:
1. Make your experiments more reproducible.
2. Track the data and code that is used for each experiment and model versions.
3. Document your projects' progress and collaborate with your team in Vectice's UI.
4. Discover previous work and reuse your team knowledge for new projects.
We are constantly improving the Vectice Python library and the Vectice application. Let us know what improvements you would like to see in the solution and what your favorite features are after completing this tutorial.
Feel free to explore more and come up with your own ideas on how to best start leveraging Vectice!
| github_jupyter |
## Loading of Stringer orientations data
includes some visualizations
```
#@title Data retrieval
import os, requests
fname = "stringer_orientations.npy"
url = "https://osf.io/ny4ut/download"
if not os.path.isfile(fname):
try:
r = requests.get(url)
except requests.ConnectionError:
print("!!! Failed to download data !!!")
else:
if r.status_code != requests.codes.ok:
print("!!! Failed to download data !!!")
else:
with open(fname, "wb") as fid:
fid.write(r.content)
#@title Data loading
import numpy as np
dat = np.load('stringer_orientations.npy', allow_pickle=True).item()
print(dat.keys())
```
dat has fields:
* dat['sresp']: neurons by stimuli, a.k.a. the neural response data (23589 by 4598)
* dat['run']: 1 by stimuli, a.k.a. the running speed of the animal in a.u.
* dat['istim']: 1 by stimuli, goes from 0 to 2*np.pi, the orientations shown on each trial
* dat['stat']: 1 by neurons, some statistics for each neuron, see Suite2p for full documentation.
* dat['stat'][k]['med']: 1 by 2, the position of each neuron k in tissue, in pixels, at a resolution of ~2um/pix.
* dat['u_spont']: neurons by 128, the weights for the top 128 principal components of spontaneous activity. Unit norm.
* dat['v_spont']: 128 by 910, the timecourses for the top 128 PCs of spont activity.
* dat['u_spont'] @ dat['v_spont']: a reconstruction of the spontaneous activity for 910 timepoints interspersed throughout the recording.
```
print(dat['sresp'].shape)
print(len(dat['stat']))
#@title import matplotlib and set defaults
from matplotlib import rcParams
from matplotlib import pyplot as plt
rcParams['figure.figsize'] = [20, 4]
rcParams['font.size'] =15
rcParams['axes.spines.top'] = False
rcParams['axes.spines.right'] = False
rcParams['figure.autolayout'] = True
#@title Basic data properties using plot, hist and scatter
ax = plt.subplot(1,5,1)
plt.hist(dat['istim'])
ax.set(xlabel='orientations', ylabel = '# trials')
ax = plt.subplot(1,5,2)
plt.scatter(dat['istim'], dat['sresp'][1000], s= 1)
ax.set(xlabel = 'orientation', ylabel = 'neural response')
ax = plt.subplot(1,5,3)
plt.plot(dat['run'][:1000])
ax.set(xlabel = 'timepoints', ylabel = 'running')
ax = plt.subplot(1,5,4)
plt.scatter(dat['run'], dat['sresp'][20998], s= 1)
ax.set(xlabel = 'running', ylabel = 'neural response')
plt.show()
#@title take PCA after preparing data by z-score
from scipy.stats import zscore
from sklearn.decomposition import PCA
Z = zscore(dat['sresp'], axis=1)
X = PCA(n_components = 200).fit_transform(Z.T)
#@title plot PCs as function of stimulus orientation
for j in range(5):
ax = plt.subplot(1,5,j+1)
plt.scatter(dat['istim'], X[:,j], s = 1)
ax.set(xlabel='orientation', ylabel = 'PC%d'%j)
plt.show()
#@title run a manifold embedding algorithm (UMAP) in two or three dimensions.
!pip install umap-learn
from umap import UMAP
ncomp = 3 # try 2, then try 3
xinit = 3 * zscore(X[:,:ncomp], axis=0)
embed = UMAP(n_components=ncomp, init = xinit, n_neighbors = 25,
metric = 'correlation', transform_seed = 42).fit_transform(X)
plt.figure(figsize=(8,8))
for i in range(ncomp):
for j in range(ncomp):
plt.subplot(ncomp,ncomp, j + ncomp*i + 1)
if i==j:
plt.scatter(dat['istim'], embed[:,i], s = 1)
else:
plt.scatter(embed[:,j], embed[:,i], s = 1, c= dat['istim'], cmap = 'hsv')
# Is that a Mobius strip? A good project would be to try to figure out why (I don't know).
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.