text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
```
%matplotlib inline
%load_ext autoreload
%autoreload 2
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import networkx as nx
import utils # local file
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.size'] = 17.0
plt.rc('text', usetex=False)
```
## Temporal study of network parameters
```
def parse_time(team, matchid):
df = pd.read_csv('./2020_Problem_D_DATA/passingevents.csv')
df = df[df['MatchID'] == matchid]
df = df[df['TeamID'] == team]
df_full = pd.read_csv('./2020_Problem_D_DATA/fullevents.csv')
df_full = df_full[df_full['MatchID'] == matchid]
df_full = df_full[df_full['TeamID'] == team]
first_half_time = df_full[df_full['MatchPeriod'] == '1H'].iloc[-1]['EventTime']
df.loc[df['MatchPeriod'] == '2H', 'EventTime'] += first_half_time
return df
def temporal_network_params(team, matchid, length=50):
from network import build_network, plot_network, calc_network_params
df = parse_time(team, matchid)
df = df[df['TeamID'] == team]
df = df[df['MatchID'] == matchid]
time = df['EventTime'].values
net_param_dict = {}
for i in range(len(df)):
if length + i >= len(df):
break
if i % 20 == 0:
print(i)
G, pos, centrality_dict, geometrical_dist, unidirection_pass, weight_dict = build_network(
df[i:length + i], team, matchid)
network_params = calc_network_params(G)
local_time = time[length + i] - time[i]
network_params['delta_time'] = local_time
network_params['time'] = time[length + i]
net_param_dict[i] = network_params
df_net_param = pd.DataFrame(net_param_dict).T
return df_net_param
df_net_huskies = temporal_network_params('Huskies', 18)
df_net_opponent = temporal_network_params('Opponent18', 18)
plt.rc('text', usetex=True)
fig, axes = plt.subplots(4, 1, figsize=(11, 10), sharex=True)
keywords = [
'clustering_coeff', 'shortest_path', 'largest_eigenvalue', 'algebraic_conn'
]
ylabel_set = ['Clustering', 'Shortest Path',
r'$\lambda_1$', r'$\widetilde{\lambda_2}$']
tag_set = ['A', 'B', 'C', 'D', 'E', 'F']
for ind, key in enumerate(keywords):
ax = axes[ind]
ax.plot(df_net_huskies['time'].values / 60, df_net_huskies[key].values,
color='steelblue', marker='.', label='Huskies')
ax.plot(df_net_opponent['time'].values / 60, df_net_opponent[key].values,
color='orange', marker='.', label='Opponent')
ax.set_ylabel(ylabel_set[ind])
ax.set_xlabel('Time (min)')
ax.tick_params(direction='in', left=True, right=True,
bottom=True, top=True, labelleft=True, labelbottom=False)
if ind == 0:
ax.legend(loc='upper right')
ylim = ax.get_ylim()
ax.text(10, ylim[1] - (ylim[1] - ylim[0]) * 0.15,
r'$\textbf{' + tag_set[ind] + '}$',
fontsize=17,
fontweight='bold',
horizontalalignment='center',
verticalalignment='center')
axes[3].tick_params(direction='in', left=True, right=True,
bottom=True, top=True, labelleft=True, labelbottom=True)
plt.subplots_adjust(hspace=0)
#plt.savefig('./Draft/temporal-net-params-m18.pdf', dpi=200, bbox_inches='tight')
#plt.savefig('./Draft/temporal-net-params-m18.png', dpi=200, bbox_inches='tight')
```
## Temporal Classical Metrics
```
def cal_mean_position(df_pass, direction):
huskies_coor_mean = df_pass['EventOrigin_{}'.format(direction)].mean()
return huskies_coor_mean
def cal_centroid_disp(df_pass):
x_cen = cal_mean_position(df_pass, 'x')
y_cen = cal_mean_position(df_pass, 'y')
dist = np.sqrt(np.square(df_pass['EventOrigin_x'] - x_cen) + \
np.square(df_pass['EventOrigin_y'] - y_cen))
dispersion = np.std(dist, ddof=1)
return dispersion
def cal_advance(df_pass):
delta_x = np.abs(df_pass['EventDestination_x'] - df_pass['EventOrigin_x'])
delta_y = np.abs(df_pass['EventDestination_y'] - df_pass['EventOrigin_y'])
return delta_y.sum() / delta_x.sum()
def temporal_classical_metrics(team, matchid, length=50):
from network import build_network, plot_network, calc_network_params
df = parse_time(team, matchid)
df = df[df['TeamID'] == team]
df = df[df['MatchID'] == matchid]
time = df['EventTime'].values
metrics_dict = {}
for i in range(len(df)):
if length + i >= len(df):
break
metric_params = {}
metric_params['x_cen'] = cal_mean_position(df[i:length + i], 'x')
metric_params['y_cen'] = cal_mean_position(df[i:length + i], 'y')
metric_params['cen_disp'] = cal_centroid_disp(df[i:length + i])
metric_params['advance'] = cal_advance(df[i:length + i])
local_time = time[length + i] - time[i]
metric_params['delta_time'] = local_time
metric_params['time'] = time[length + i]
metrics_dict[i] = metric_params
df_metrics = pd.DataFrame(metrics_dict).T
return df_metrics
df_metrics_huskies = temporal_classical_metrics('Huskies', 18)
df_metrics_opponent = temporal_classical_metrics('Opponent18', 18)
plt.rc('text', usetex=True)
fig, axes = plt.subplots(4, 1, figsize=(11, 10), sharex=True)
keywords = [
'x_cen', 'y_cen', 'cen_disp', 'advance'
]
ylabel_set = [r'$<X>$', r'$<Y>$',
'Dispersion', r'$<\Delta Y>/<\Delta X>$']
tag_set = ['A', 'B', 'C', 'D', 'E', 'F']
for ind, key in enumerate(keywords):
ax = axes[ind]
ax.plot(df_metrics_huskies['time'].values / 60, df_metrics_huskies[key].values,
color='steelblue', marker='.', label='Huskies')
ax.plot(df_metrics_opponent['time'].values / 60, df_metrics_opponent[key].values,
color='orange', marker='.', label='Opponent')
ax.set_ylabel(ylabel_set[ind])
ax.set_xlabel('Time (min)')
ax.tick_params(direction='in', left=True, right=True,
bottom=True, top=True, labelleft=True, labelbottom=False)
if ind == 0:
ax.legend()
ylim = ax.get_ylim()
ax.text(10, ylim[1] - (ylim[1] - ylim[0]) * 0.15,
r'$\textbf{' + tag_set[ind] + '}$',
fontsize=17,
fontweight='bold',
horizontalalignment='center',
verticalalignment='center')
axes[3].tick_params(direction='in', left=True, right=True,
bottom=True, top=True, labelleft=True, labelbottom=True)
plt.subplots_adjust(hspace=0)
plt.savefig('./Draft/temporal-spatial-m18.pdf', dpi=200, bbox_inches='tight')
plt.savefig('./Draft/temporal-spatial-m18.png', dpi=200, bbox_inches='tight')
```
| github_jupyter |
# IASI Data Experiment
```
import sys
sys.path.append('/home/emmanuel/projects/2019_egp/src')
from data.iasi import IASIOrbits, create_dataarray
from experiments.experiment_iasi import GPModels
from models.gp_models import SparseGP
import GPy
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from sklearn.externals import joblib
%matplotlib inline
%load_ext autoreload
%autoreload 2
```
## Experimental Parameters
```
n_components = 50
train_size = 5000
n_train_orbits = 'all'
n_test_orbits = 'all'
random_state = 123
chunksize = 4000
rng = np.random.RandomState(random_state)
orbits = [
'20131001120859',
'20131001102955',
'20131001015955',
'20131001202954',
'20131001185058',
'20131001084755',
'20131001152954',
'20131001170858',
'20131001221154',
'20131001135058',
'20131001034155',
'20131001070555',
'20131001052355']
print(len(orbits))
day = orbits[0][:8]
iasi_data = IASIOrbits(
n_components = n_components,
train_size=train_size,
input_noise_level=None,
noise_coefficient=1.0,
n_train_orbits=n_train_orbits,
n_test_orbits=n_test_orbits,
)
# Get Training Data
training_data = iasi_data.get_training_data()
```
### Training Region
```
train_xarray = create_dataarray(
training_data['y'],
training_data['lat'],
training_data['lon'],
day)
train_xarray.name = 'Temperature'
train_xarray.min(), train_xarray.max()
# Labeled Data
fig = plt.figure(figsize=(15, 5))
ax1 = fig.add_subplot(121, projection=ccrs.PlateCarree())
ax2 = fig.add_subplot(122)
plot_data = train_xarray.isel(time=0) - 273.15
plot_data.plot.imshow(ax=ax1, transform=ccrs.PlateCarree(),
cmap='RdBu_r', alpha=1.0, vmin=-10, vmax=30,
cbar_kwargs={'orientation': 'horizontal',
'label': 'Temperature'})
ax1.coastlines()
plot_data.plot.hist(ax=ax2, bins=100)
ax2.set_xlabel('Data Histogram')
# fig.savefig(fig_save_loc + 't1_bias.png')
plt.show()
```
### Save Training Data
```
save_path = '/home/emmanuel/projects/2019_egp/data/processed/'
# DATA
train_xarray.to_netcdf(f"{save_path}training_egp_v1.nc")
```
### Model Parameters and Training
```
# Model Parameters
init_params = True
n_inducing = 1000
x_variance = iasi_data.x_cov
n_restarts = 1
# Initialize Modeler
clf = GPModels(
init_params = init_params,
x_variance=x_variance,
n_inducing=n_inducing,
n_restarts=n_restarts,
)
# Fit Models
clf.train_models(
training_data['X'],
training_data['y']
);
```
#### Save Models
```
save_path = '/home/emmanuel/projects/2019_egp/data/processed/'
# MODELS
joblib.dump(clf, f"{save_path}models_v1.pckl");
clf.models['error'].kernel_
```
### Testing
```
iasi_data.n_test_orbits = 13
datasets = xr.Dataset()
n_test = list()
for xtest, labels, lon, lat, orbit in iasi_data.get_testing_data():
n_test.append(xtest.shape)
temp_dataset = xr.Dataset()
# Save Labels Dataarray
temp_dataset['Temperature'] = create_dataarray(labels, lat, lon, orbit)
for imodel, ypred, ystd in clf.test_models(xtest, noise_less=True):
# Create Predictions array
temp_dataset[f"{imodel}_pred"] = create_dataarray(ypred, lat, lon, orbit)
# Create Standard Deviations array
temp_dataset[f"{imodel}_std"] = create_dataarray(ystd, lat, lon, orbit)
datasets = datasets.merge(temp_dataset)
del temp_dataset
n_test = np.array(n_test)
```
## Save Results
```
save_path = '/home/emmanuel/projects/2019_egp/data/processed/'
# DATA
datasets.to_netcdf(f"{save_path}results_egp_v1.nc")
```
| github_jupyter |
# Testing differences between groups
```
# Import numerical, data and plotting libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# Only show 4 decimals when printing
np.set_printoptions(precision=4)
# Show the plots in the notebook
%matplotlib inline
```
Imagine we have some some measures of psychopathy in 12 students. 4 students are from Berkeley, and 4 students are from MIT.
```
psychos = pd.read_csv('psycho_students.csv')
psychos
```
We find that the mean score for the Berkeley students is different from the mean score for the MIT students:
```
berkeley_students = psychos[psychos['university'] == 'Berkeley']
berkeley_students
mit_students = psychos[psychos['university'] == 'MIT']
mit_students
berkeley_scores = berkeley_students['psychopathy']
mit_scores = mit_students['psychopathy']
berkeley_scores.mean(), mit_scores.mean()
```
Here is the difference between the means:
```
mean_diff = berkeley_scores.mean() - mit_scores.mean()
mean_diff
```
That's the difference we see. But - if we take any 8 students from a single university, and take the mean of the first four, and the mean of the second four, there will almost certainly be a difference in the means, just because there's some difference across individuals in the psychopathy score. Is this difference we see unusual compared to the differences we would see if we took eight students from the same university, and compared the means of the first four and the second four?
For a moment, let us pretend that all our Berkeley and MIT students come from the same university. Then I can pool the Berkely and MIT students together.
```
all_pooled = list(berkeley_scores) + list(mit_scores)
all_pooled
```
If there is no difference between Berkeley and MIT, then it should be OK to just shuffle the students to a random order, like this:
```
from random import shuffle
shuffle(all_pooled)
all_pooled
```
Now I can just pretend that the first four students are from one university, and the last four are from another university. Then I can compare the means.
```
fake_berkeley = all_pooled[:4]
fake_mit = all_pooled[4:]
np.mean(fake_berkeley) - np.mean(fake_mit)
```
```
fake_differences = []
for i in range(10000):
np.random.shuffle(all_pooled)
diff = np.mean(all_pooled[:4]) - np.mean(all_pooled[4:])
fake_differences.append(diff)
```
The 10000 values we calculated form the *sampling distribution*. Let's have a look:
```
plt.hist(fake_differences)
plt.title("Sampling distribution of mean difference");
```
Where does the value we actually see, sit in this histogram? More specifically, how many of the values in this histogram are less then or equal to the value we actually see?
```
# We will count the number of fake_differences <= our observed
count = 0
# Go through each of the 10000 values one by one
for diff in fake_differences:
if diff <= mean_diff:
count = count + 1
proportion = count / 10000
proportion
```
That's the p value.
| github_jupyter |
# Deep Neural Network for Image Classification: Application
When you finish this, you will have finished the last programming assignment of Week 4, and also the last programming assignment of this course!
You will use use the functions you'd implemented in the previous assignment to build a deep network, and apply it to cat vs non-cat classification. Hopefully, you will see an improvement in accuracy relative to your previous logistic regression implementation.
**After this assignment you will be able to:**
- Build and apply a deep neural network to supervised learning.
Let's get started!
## 1 - Packages
Let's first import all the packages that you will need during this assignment.
- [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.
- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.
- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.
- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.
- dnn_app_utils provides the functions implemented in the "Building your Deep Neural Network: Step by Step" assignment to this notebook.
- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
```
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from dnn_app_utils_v3 import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
```
## 2 - Dataset
You will use the same "Cat vs non-Cat" dataset as in "Logistic Regression as a Neural Network" (Assignment 2). The model you had built had 70% test accuracy on classifying cats vs non-cats images. Hopefully, your new model will perform a better!
**Problem Statement**: You are given a dataset ("data.h5") containing:
- a training set of m_train images labelled as cat (1) or non-cat (0)
- a test set of m_test images labelled as cat and non-cat
- each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB).
Let's get more familiar with the dataset. Load the data by running the cell below.
```
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()
```
The following code will show you an image in the dataset. Feel free to change the index and re-run the cell multiple times to see other images.
```
# Example of a picture
index = 10
plt.imshow(train_x_orig[index])
print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
# Explore your dataset
m_train = train_x_orig.shape[0]
num_px = train_x_orig.shape[1]
m_test = test_x_orig.shape[0]
print ("Number of training examples: " + str(m_train))
print ("Number of testing examples: " + str(m_test))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_x_orig shape: " + str(train_x_orig.shape))
print ("train_y shape: " + str(train_y.shape))
print ("test_x_orig shape: " + str(test_x_orig.shape))
print ("test_y shape: " + str(test_y.shape))
```
As usual, you reshape and standardize the images before feeding them to the network. The code is given in the cell below.
<img src="images/imvectorkiank.png" style="width:450px;height:300px;">
<caption><center> <u>Figure 1</u>: Image to vector conversion. <br> </center></caption>
```
# Reshape the training and test examples
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The "-1" makes reshape flatten the remaining dimensions
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten/255.
test_x = test_x_flatten/255.
print ("train_x's shape: " + str(train_x.shape))
print ("test_x's shape: " + str(test_x.shape))
```
$12,288$ equals $64 \times 64 \times 3$ which is the size of one reshaped image vector.
## 3 - Architecture of your model
Now that you are familiar with the dataset, it is time to build a deep neural network to distinguish cat images from non-cat images.
You will build two different models:
- A 2-layer neural network
- An L-layer deep neural network
You will then compare the performance of these models, and also try out different values for $L$.
Let's look at the two architectures.
### 3.1 - 2-layer neural network
<img src="images/2layerNN_kiank.png" style="width:650px;height:400px;">
<caption><center> <u>Figure 2</u>: 2-layer neural network. <br> The model can be summarized as: ***INPUT -> LINEAR -> RELU -> LINEAR -> SIGMOID -> OUTPUT***. </center></caption>
<u>Detailed Architecture of figure 2</u>:
- The input is a (64,64,3) image which is flattened to a vector of size $(12288,1)$.
- The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ of size $(n^{[1]}, 12288)$.
- You then add a bias term and take its relu to get the following vector: $[a_0^{[1]}, a_1^{[1]},..., a_{n^{[1]}-1}^{[1]}]^T$.
- You then repeat the same process.
- You multiply the resulting vector by $W^{[2]}$ and add your intercept (bias).
- Finally, you take the sigmoid of the result. If it is greater than 0.5, you classify it to be a cat.
### 3.2 - L-layer deep neural network
It is hard to represent an L-layer deep neural network with the above representation. However, here is a simplified network representation:
<img src="images/LlayerNN_kiank.png" style="width:650px;height:400px;">
<caption><center> <u>Figure 3</u>: L-layer neural network. <br> The model can be summarized as: ***[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID***</center></caption>
<u>Detailed Architecture of figure 3</u>:
- The input is a (64,64,3) image which is flattened to a vector of size (12288,1).
- The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ and then you add the intercept $b^{[1]}$. The result is called the linear unit.
- Next, you take the relu of the linear unit. This process could be repeated several times for each $(W^{[l]}, b^{[l]})$ depending on the model architecture.
- Finally, you take the sigmoid of the final linear unit. If it is greater than 0.5, you classify it to be a cat.
### 3.3 - General methodology
As usual you will follow the Deep Learning methodology to build the model:
1. Initialize parameters / Define hyperparameters
2. Loop for num_iterations:
a. Forward propagation
b. Compute cost function
c. Backward propagation
d. Update parameters (using parameters, and grads from backprop)
4. Use trained parameters to predict labels
Let's now implement those two models!
## 4 - Two-layer neural network
**Question**: Use the helper functions you have implemented in the previous assignment to build a 2-layer neural network with the following structure: *LINEAR -> RELU -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:
```python
def initialize_parameters(n_x, n_h, n_y):
...
return parameters
def linear_activation_forward(A_prev, W, b, activation):
...
return A, cache
def compute_cost(AL, Y):
...
return cost
def linear_activation_backward(dA, cache, activation):
...
return dA_prev, dW, db
def update_parameters(parameters, grads, learning_rate):
...
return parameters
```
```
### CONSTANTS DEFINING THE MODEL ####
n_x = 12288 # num_px * num_px * 3
n_h = 7
n_y = 1
layers_dims = (n_x, n_h, n_y)
# GRADED FUNCTION: two_layer_model
def two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):
"""
Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (n_x, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- dimensions of the layers (n_x, n_h, n_y)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- If set to True, this will print the cost every 100 iterations
Returns:
parameters -- a dictionary containing W1, W2, b1, and b2
"""
np.random.seed(1)
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
(n_x, n_h, n_y) = layers_dims
# Initialize parameters dictionary, by calling one of the functions you'd previously implemented
### START CODE HERE ### (โ 1 line of code)
parameters = initialize_parameters(n_x, n_h, n_y)
### END CODE HERE ###
# Get W1, b1, W2 and b2 from the dictionary parameters.
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: "X, W1, b1, W2, b2". Output: "A1, cache1, A2, cache2".
### START CODE HERE ### (โ 2 lines of code)
A1, cache1 = linear_activation_forward(X, W1, b1, "relu")
A2, cache2 = linear_activation_forward(A1, W2, b2, "sigmoid")
### END CODE HERE ###
# Compute cost
### START CODE HERE ### (โ 1 line of code)
cost = compute_cost(A2, Y)
### END CODE HERE ###
# Initializing backward propagation
dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))
# Backward propagation. Inputs: "dA2, cache2, cache1". Outputs: "dA1, dW2, db2; also dA0 (not used), dW1, db1".
### START CODE HERE ### (โ 2 lines of code)
dA1, dW2, db2 = linear_activation_backward(dA2, cache2, "sigmoid")
dA0, dW1, db1 = linear_activation_backward(dA1, cache1, "relu")
### END CODE HERE ###
# Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2
grads['dW1'] = dW1
grads['db1'] = db1
grads['dW2'] = dW2
grads['db2'] = db2
# Update parameters.
### START CODE HERE ### (approx. 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Retrieve W1, b1, W2, b2 from parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
```
Run the cell below to train your parameters. See if your model runs. The cost should be decreasing. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (โฌ) on the upper bar of the notebook to stop the cell and try to find your error.
```
parameters = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2500, print_cost=True)
```
**Expected Output**:
<table>
<tr>
<td> **Cost after iteration 0**</td>
<td> 0.6930497356599888 </td>
</tr>
<tr>
<td> **Cost after iteration 100**</td>
<td> 0.6464320953428849 </td>
</tr>
<tr>
<td> **...**</td>
<td> ... </td>
</tr>
<tr>
<td> **Cost after iteration 2400**</td>
<td> 0.048554785628770206 </td>
</tr>
</table>
Good thing you built a vectorized implementation! Otherwise it might have taken 10 times longer to train this.
Now, you can use the trained parameters to classify images from the dataset. To see your predictions on the training and test sets, run the cell below.
```
predictions_train = predict(train_x, train_y, parameters)
```
**Expected Output**:
<table>
<tr>
<td> **Accuracy**</td>
<td> 1.0 </td>
</tr>
</table>
```
predictions_test = predict(test_x, test_y, parameters)
```
**Expected Output**:
<table>
<tr>
<td> **Accuracy**</td>
<td> 0.72 </td>
</tr>
</table>
**Note**: You may notice that running the model on fewer iterations (say 1500) gives better accuracy on the test set. This is called "early stopping" and we will talk about it in the next course. Early stopping is a way to prevent overfitting.
Congratulations! It seems that your 2-layer neural network has better performance (72%) than the logistic regression implementation (70%, assignment week 2). Let's see if you can do even better with an $L$-layer model.
## 5 - L-layer Neural Network
**Question**: Use the helper functions you have implemented previously to build an $L$-layer neural network with the following structure: *[LINEAR -> RELU]$\times$(L-1) -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:
```python
def initialize_parameters_deep(layers_dims):
...
return parameters
def L_model_forward(X, parameters):
...
return AL, caches
def compute_cost(AL, Y):
...
return cost
def L_model_backward(AL, Y, caches):
...
return grads
def update_parameters(parameters, grads, learning_rate):
...
return parameters
```
```
### CONSTANTS ###
layers_dims = [12288, 20, 7, 5, 1] # 4-layer model
# GRADED FUNCTION: L_layer_model
def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (number of examples, num_px * num_px * 3)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization. (โ 1 line of code)
### START CODE HERE ###
parameters = initialize_parameters_deep(layers_dims)
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
### START CODE HERE ### (โ 1 line of code)
AL, caches = L_model_forward(X, parameters)
### END CODE HERE ###
# Compute cost.
### START CODE HERE ### (โ 1 line of code)
cost = compute_cost(AL, Y)
### END CODE HERE ###
# Backward propagation.
### START CODE HERE ### (โ 1 line of code)
grads = L_model_backward(AL, Y, caches)
### END CODE HERE ###
# Update parameters.
### START CODE HERE ### (โ 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
```
You will now train the model as a 4-layer neural network.
Run the cell below to train your model. The cost should decrease on every iteration. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (โฌ) on the upper bar of the notebook to stop the cell and try to find your error.
```
parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)
```
**Expected Output**:
<table>
<tr>
<td> **Cost after iteration 0**</td>
<td> 0.771749 </td>
</tr>
<tr>
<td> **Cost after iteration 100**</td>
<td> 0.672053 </td>
</tr>
<tr>
<td> **...**</td>
<td> ... </td>
</tr>
<tr>
<td> **Cost after iteration 2400**</td>
<td> 0.092878 </td>
</tr>
</table>
```
pred_train = predict(train_x, train_y, parameters)
```
<table>
<tr>
<td>
**Train Accuracy**
</td>
<td>
0.985645933014
</td>
</tr>
</table>
```
pred_test = predict(test_x, test_y, parameters)
```
**Expected Output**:
<table>
<tr>
<td> **Test Accuracy**</td>
<td> 0.8 </td>
</tr>
</table>
Congrats! It seems that your 4-layer neural network has better performance (80%) than your 2-layer neural network (72%) on the same test set.
This is good performance for this task. Nice job!
Though in the next course on "Improving deep neural networks" you will learn how to obtain even higher accuracy by systematically searching for better hyperparameters (learning_rate, layers_dims, num_iterations, and others you'll also learn in the next course).
## 6) Results Analysis
First, let's take a look at some images the L-layer model labeled incorrectly. This will show a few mislabeled images.
```
print_mislabeled_images(classes, test_x, test_y, pred_test)
```
**A few types of images the model tends to do poorly on include:**
- Cat body in an unusual position
- Cat appears against a background of a similar color
- Unusual cat color and species
- Camera Angle
- Brightness of the picture
- Scale variation (cat is very large or small in image)
## 7) Test with your own image (optional/ungraded exercise) ##
Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Change your image's name in the following code
4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
```
## START CODE HERE ##
my_image = "my_image.jpg" # change this to the name of your image file
my_label_y = [1] # the true class of your image (1 -> cat, 0 -> non-cat)
## END CODE HERE ##
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((num_px*num_px*3,1))
my_image = my_image/255.
my_predicted_image = predict(my_image, my_label_y, parameters)
plt.imshow(image)
print ("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
```
**References**:
- for auto-reloading external module: http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
| github_jupyter |
# Portfolio Optimization
- ํฌํธํด๋ฆฌ์ค ์ต์ ํ ์ด๋ก ์ ๋ํ ์ ๋ฆฌ ์๋ฃ
- **ํฌํธํด๋ฆฌ์ค**๋, ๋ค์ํ ์์ฐ์ ๋ถ์ฐํ์ฌ ํฌ์ํ๋ ๊ฒ์ ๋งํจ
- **๋ถ์ฐํฌ์**๋ฅผ ํตํด ๋ณ๋์ฑ๊ณผ ์ํ์ ๋ฎ์ถ ์ ์์ (๊ณ๋์ ํ ๋ฐ๊ตฌ๋์ ๋ด์ง ๋ง๋ผ)
- **์์ฐ ๋ฐฐ๋ถ์ด๋?** ์ํ ๋๋น ์์ต์ ์ต๋ํํ๋ ํฌํธํด๋ฆฌ์ค๋ฅผ ๊ตฌ์ฑํ๋ ๊ฒ
---
#### Tactical Asset Allocation(TAA)
- ์ํ ๋๋น ์์ต์ "๋จ๊ธฐ์ ์ผ๋ก" ์ต๋ํ
- Smart Beta...
#### Strategic Asset Allocation(SAA)
- ์ํ ๋๋น ์์ต์ "์ฅ๊ธฐ์ ์ผ๋ก" ์ต๋ํ
- ์ผ๋ฐ์ ์ผ๋ก ์ํ๊ณผ ์์ต๋ฅ ์ ๋น๋ก ๊ด๊ณ
- SAA ๋ชจ๋ธ์ ์ํ(๋ณ๋์ฑ)์ด ๋ฎ์ผ๋ฉด์ ์์ต๋ฅ ์ด ๋์ ํฌํธํด๋ฆฌ์ค๋ฅผ ๋ง๋๋ ๊ฒ์ด ๋ชฉํ
- Markowitz, Black-Litterman Model...
---
#### ํ๊ท ์์ต๋ฅ (Expectation)
- ์ ํ๊ท ์์ต๋ฅ ๊ณผ ๊ฐ์ด ๊ธฐ๊ฐ์ด ํฌํจ๋ ๊ฒฝ์ฐ๋ผ๋ฉด ๊ธฐํํ๊ท ์ ์ฌ์ฉ
- $E(R) = \sqrt[N]{R_1 \times R_2... \times R_i}$
- $E(R) = \frac{1}{N}\sum_{i=1}^{N}R_i$
#### ๋ณ๋์ฑ (Varience)
- ๋ณ๋์ฑ(=์ํ)์ ๊ธฐ๋๊ฐ์ผ๋ก ๋ถํฐ ์ผ๋ง๋ ๋จ์ด์ ธ์๋์ง๋ฅผ ๋ํ๋ด๋ ๋ถ์ฐ๊ณผ ๋์ผ
- $\sigma^2 = Var(R) = \frac{1}{N-1}\sum_{i=1}^{N}(R_i-\bar{R})$
#### ๊ณต๋ถ์ฐ (Covarience)
- ํ๋ฅ ๋ณ์๊ฐ 2๊ฐ ์ด์์ผ ๋ ๊ฐ ํ๋ฅ ๋ณ์๋ค์ด ์ผ๋ง๋ ํผ์ ธ์๋์ง๋ฅผ ๋ํ๋ด๋ ๊ฐ
- $Cov(R^1, R^2) = E[(R^1-\bar{R^1})(R^2-\bar{R^2})] = \frac{1}{N-1}\sum_{i=1}^{N}(R^1-\bar{R^1})(R^2-\bar{R^2})$
#### ์๊ด๊ด๊ณ (Correlation Coefficient)
- ํ๋ฅ ๋ณ์์ ์ ๋์ ํฌ๊ธฐ์ ์ํฅ์ ๋ฐ์ง ์๋๋ก 0๊ณผ 1์ฌ์ด๋ก ๋จ์ํ์ํจ ๊ฐ
- $\rho = \cfrac{Cov(X,Y)}{Std(X)Std(Y)}, (-1\leq\rho\leq1)$
---
## ํฌํธํด๋ฆฌ์ค ๊ธฐ๋์์ต๊ณผ ์ํ ์ธก์
#### ํฌํธํด๋ฆฌ์ค ์ ์
- ์ฃผ์ด์ง ์์ฐ ๋ด์์ ์์ฐ ๋ณ ํฌ์ ๋น์ค
- ํฌ์ํ ์์ฐ ๊ตฐ์ ๊ฒฐ์ ํ๊ณ ๊ฒฐ์ ํ ์์ฐ ๋ณ ์์ต๋ฅ , ๋ณ๋์ฑ ๋ฐ ์๊ด๊ด๊ณ๋ฅผ ๊ณ์ฐ
- ๋ณ๋์ฑ ๋๋น ์์ต๋ฅ ์ด ๊ฐ์ฅ ๋์ ํฌํธํด๋ฆฌ์ค๋ฅผ ๊ตฌ์ฑํ๋ ๊ฒ์ด ๋ชฉํ
- $w = portfolio = [w_1, w_2, ... , w_N]^T, where \sum_{i=1}^{N}w_i = 1$
#### ํฌํธํด๋ฆฌ์ค์ ๊ธฐ๋ ์์ต (Weighted Average)
- ๊ฐ๋ณ ์์ฐ์ ๊ธฐ๋์์ต๋ฅ ๊ณผ ํฌํธํด๋ฆฌ์ค์ ๋น์ค์ ๊ณฑํด์ ํฉ์ฐ
- $w = portfolio = [w_1, w_2, ... , w_N]^T, where \sum_{i=1}^{N}w_i = 1$
- $\mu_p = portfolio \times expectation = [w_1, w_2, ... , w_N][R_1, R_2, ... , R_N]^T$
#### ํฌํธํด๋ฆฌ์ค์ ๋ณ๋์ฑ (=์ํ)
- $\sigma_p^2 = [w_1, w_2, ... , w_N]
\begin{bmatrix}
\sigma_{11} & \sigma_{12} & \cdots & \sigma_{1n} \\
\vdots & \vdots & \ddots & \vdots \\
\sigma_{n1} & \sigma_{n2} & \cdots & \sigma_{n^2}
\end{bmatrix}
\begin{bmatrix}
w_{1} \\
w_{2} \\
\vdots \\
w_{N} \\
\end{bmatrix}
$
```
import math
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('darkgrid')
%matplotlib inline
def get_dataset(code, start, end):
df = pd.read_pickle("../dataset/{}.p".format(code))[::-1]
df = df[df['date'].between(start, end, inclusive=True)]
df = df.drop(['diff'], axis=1).set_index('date').sort_index()
return df
# KAKAO 2017-01-01 ~ 2018-03-30
# NAVER 2017-01-01 ~ 2018-03-30
kakao = get_dataset('035720', '2017-01-01', '2018-03-30')
naver = get_dataset('035420', '2017-01-01', '2018-03-30')
```
## NAVER vs KAKAO
- 2017-01-01 ~ 2018-03-30 ๊ธฐ๊ฐ์ ์ข
๊ฐ ๊ทธ๋ํ
- ์๋ ๊ทธ๋ํ๋ฅผ ํตํด ๋ ์ข
๋ชฉ์ ๋ณ๋์ฑ์ ๋น๊ตํ ์ ์์๊น?
```
plt.figure(figsize=(12,8))
plt.xlabel("Time Period")
plt.ylabel("Stock Price")
kakao_price = kakao['close']
naver_price = naver['close']
plt.plot(naver_price)
plt.plot(kakao_price)
plt.legend(['NAVER', 'KAKAO'], loc='upper left')
plt.figure(figsize=(12,8))
plt.xlabel("Time Period")
plt.ylabel("Stock Price")
kakao_price = kakao['close']
mean = kakao_price.replace(kakao_price, kakao_price.mean())
plt.plot(kakao_price)
plt.plot(mean)
plt.legend(['KAKAO', 'MEAN'], loc='upper left')
```
## Daily per change
```
plt.figure(figsize=(12,8))
real_returns = kakao_price.pct_change()
plt.bar(real_returns.index,real_returns)
```
## Mean-Varience on Single Stock
```
def income(start, end):
return round((end - start) / start * 100, 2)
def geometric_mean(iterable):
iterable = [i for i in iterable if int(i) is not 0]
a = np.log(iterable)
return np.exp(a.sum()/len(a))
point = kakao_price[0]
result = kakao_price.apply(lambda d: income(point, d))
result.head()
print("Mean of daily income: {}".format(np.mean(result)))
print("Geometric Mean of daily income: {}".format(geometric_mean(result)))
print("Varience of daily income: {}".format(np.var(result)))
print("Standard Deviation of daily income: {}".format(np.std(result)))
```
## Correlation
- r์ด -1์ ๊ฐ๊น์ธ ์๋ก ์์ ์๊ด๊ด๊ณ, +1์ ๊ฐ๊น์ธ ์๋ก ์์ ์๊ด๊ด๊ณ
- r์ด -0.1๊ณผ +0.1 ์ฌ์ด์ด๋ฉด, ๊ฑฐ์ ๋ฌด์๋ ์ ์๋ ์๊ด๊ด๊ณ
```
naver_price.corr(kakao_price, method='pearson')
```
## Mean-Varience on Portfolio
- ๋ค์ด๋ฒ(30%), ์นด์นด์ค(30%), ์
ํธ๋ฆฌ์จ(20%), SK์ด๋
ธ๋ฒ ์ด์
(20%)
```
def init_portfolio(stock, ratio, start, end):
dfs = []
for each in stock:
df = get_dataset(each, start, end)['close']
point = df[0]
result = df.apply(lambda d: income(point, d))
dfs.append(result)
return pd.concat(dfs, axis=1, keys=stock)
def port_mean_var(avg_ret_, var_covar_, w_):
port_ret = np.dot(w_, avg_ret_)
port_std = np.dot(np.dot(w_, var_covar_), w_.T)
return port_ret, port_std
stock = ['035420', '035720', '068270', '096770']
ratio = [0.3, 0.3, 0.2, 0.2]
df = init_portfolio(stock, ratio, '2017-01-01', '2018-03-30')
df.head()
avg_ret = df.mean()
var_covar = df.cov()
w = np.array(ratio).T
mean, var = port_mean_var(avg_ret, var_covar, w)
print("Mean of portfolio: {}".format(mean))
print("Varience of portfolio: {}".format(var))
```
| github_jupyter |
# Improved feature engineering
*Anders Poirel - 11-02-2020*
Ideas I'll be building on
- seperating by city (data has different structure between the cities, avoids needing to build a more complex model that captures feature interactions)
- using the lifecycle of the mosquito: new mostiquos become adults 1-3 weeks after eggs are laid in water. Therefore, we could expect a lot of cases if the previous 2-3week/~month was humid
**Note**: I realized I used *median* absolute error instead of mean absolute error in the previous notebook, which explain why my CV scores were so far from the test set scores!
```
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import sklearn
from os.path import join
DATA_PATH = '../data/raw/'
```
## Acquiring the data
```
X_test_o = pd.read_csv(join(DATA_PATH, 'dengue_features_test.csv'))
X_train_o = pd.read_csv(join(DATA_PATH, 'dengue_features_train.csv'))
y_train_o = pd.read_csv(join(DATA_PATH, 'dengue_labels_train.csv'))
```
### Preprocessing
```
X_train = pd.get_dummies(X_train_o, columns = ['city'], drop_first = True)
X_test = pd.get_dummies(X_test_o, columns = ['city'], drop_first = True)
X_train = X_train.drop('week_start_date', axis = 1)
X_test = X_test.drop('week_start_date', axis = 1)
```
Drop features that have correlation 1 with other features
```
X_train.drop(
['reanalysis_sat_precip_amt_mm', 'reanalysis_dew_point_temp_k',
'reanalysis_tdtr_k'],
axis = 1,
inplace = True
)
X_test.drop(
['reanalysis_sat_precip_amt_mm', 'reanalysis_dew_point_temp_k',
'reanalysis_tdtr_k'],
axis = 1,
inplace = True
)
y_train
```
### Precipitation at several time lags
First, we split the data by city:
```
X_train_sj = X_train[X_train['city_sj'] == 1]
X_train_iq = X_train[X_train['city_sj'] == 0]
X_test_sj = X_test[X_test['city_sj'] == 1]
X_test_iq = X_test[X_test['city_sj'] == 0]
y_train_sj = y_train[y_train['city'] == 'sj']['total_cases']
y_train_iq = y_train[y_train['city'] == 'iq']['total_cases']
def precip_n_weeks(k, n, precips):
if k - n < 0:
re turn .0
else:
return precips[k - n]
train_precip_sj = X_train_sj['precipitation_amt_mm']
train_precip_iq = X_train_iq['precipitation_amt_mm']
test_precip_sj = X_test_sj['precipitation_amt_mm']
test_precip_iq = X_test_iq['precipitation_amt_mm']
```
We re-index the series for Iquitos so that they start from 0 and our code can run properly
```
iq_train_index = list(range(len(train_precip_iq)))
iq_test_index = list(range(len(test_precip_iq)))
train_precip_iq.index = iq_train_index
test_precip_iq.index = iq_test_index
X_train_sj['precip_2'] = [precip_n_weeks(k, 2, train_precip_sj)
for k in range(len(train_precip_sj))]
X_train_sj['precip_3'] = [precip_n_weeks(k, 3, train_precip_sj)
for k in range(len(train_precip_sj))]
X_train_sj['precip_4'] = [precip_n_weeks(k, 4, train_precip_sj)
for k in range(len(train_precip_sj))]
X_test_sj['precip_2'] = [precip_n_weeks(k, 2, test_precip_sj)
for k in range(len(test_precip_sj))]
X_test_sj['precip_3'] = [precip_n_weeks(k, 3, test_precip_sj)
for k in range(len(test_precip_sj))]
X_test_sj['precip_4'] = [precip_n_weeks(k, 4, test_precip_sj)
for k in range(len(test_precip_sj))]
X_train_iq['precip_2'] = [precip_n_weeks(k, 2, train_precip_iq)
for k in range(len(train_precip_iq))]
X_train_iq['precip_3'] = [precip_n_weeks(k, 3, train_precip_iq)
for k in range(len(train_precip_iq))]
X_train_iq['precip_4'] = [precip_n_weeks(k, 4, train_precip_iq)
for k in range(len(train_precip_iq))]
X_test_iq['precip_2'] = [precip_n_weeks(k, 2, test_precip_iq)
for k in range(len(test_precip_iq))]
X_test_iq['precip_3'] = [precip_n_weeks(k, 3, test_precip_iq)
for k in range(len(test_precip_iq))]
X_test_iq['precip_4'] = [precip_n_weeks(k, 4, test_precip_iq)
for k in range(len(test_precip_iq))]
```
Let's check that this worked as intended:
```
X_test_sj.head(30)
```
## Building the models
```
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.linear_model import LinearRegression, ElasticNet
from sklearn.model_selection import (cross_validate, TimeSeriesSplit,
RandomizedSearchCV)
```
#### ElasticNet with penalty
San Jose:
```
en_sj = Pipeline([
('scale', StandardScaler()),
('impute_m', SimpleImputer()),
('en', LinearRegression())
])
cv_res_sj = cross_validate(
estimator = en_sj,
X = X_train_sj,
y = y_train_sj,
cv = TimeSeriesSplit(n_splits = 10),
scoring = 'neg_mean_absolute_error',
n_jobs = -1
)
en_sj_score = np.mean(cv_res_sj['test_score'])
en_sj_score
en_sj.fit(X_train_sj, y_train_sj)
y_pred_sj = en_sj.predict(X_train_sj)
```
Iquitos:
```
en_iq = Pipeline([
('scale', StandardScaler()),
('impute_m', SimpleImputer()),
('en', ElasticNet(alpha = 10))
])
cv_res_iq = cross_validate(
estimator = en_iq,
X = X_train_iq,
y = y_train_iq,
cv = TimeSeriesSplit(n_splits = 10),
scoring = 'neg_mean_absolute_error',
n_jobs = -1
)
en_iq_score = np.mean(cv_res_iq['test_score'])
en_iq_score
y_train_iq.mean()
```
Something is really strange here... both models have large MAEs (close to the means values of the targets for each)
```
plt.style.use('default')
```
We get the date data for each city:
```
sj_dates = X_train_o[X_train_o['city'] == 'sj']['week_start_date']
iq_dates = X_train_o[X_train_o['city'] == 'iq']['week_start_date']
ax = plt.axes()
ax.plot(sj_dates, y_pred_sj)
ax.plot(sj_dates, y_train_sj)
```
It appears that the model is predicting very close to the mean
### Building a submission
```
submission = pd.read_csv(join(DATA_PATH, 'submission_format.csv'))
y_pred = poly_model_3.predict(X_test)
submission['total_cases'] = np.round(y_pred).astype(int)
submission
submission.to_csv('../models/baseline.csv', index = False)
```
| github_jupyter |
# T1499.002 - Service Exhaustion Flood
Adversaries may target the different network services provided by systems to conduct a DoS. Adversaries often target DNS and web services, however others have been targeted as well.(Citation: Arbor AnnualDoSreport Jan 2018) Web server software can be attacked through a variety of means, some of which apply generally while others are specific to the software being used to provide the service.
One example of this type of attack is known as a simple HTTP flood, where an adversary sends a large number of HTTP requests to a web server to overwhelm it and/or an application that runs on top of it. This flood relies on raw volume to accomplish the objective, exhausting any of the various resources required by the victim software to provide the service.(Citation: Cloudflare HTTPflood)
Another variation, known as a SSL renegotiation attack, takes advantage of a protocol feature in SSL/TLS. The SSL/TLS protocol suite includes mechanisms for the client and server to agree on an encryption algorithm to use for subsequent secure connections. If SSL renegotiation is enabled, a request can be made for renegotiation of the crypto algorithm. In a renegotiation attack, the adversary establishes a SSL/TLS connection and then proceeds to make a series of renegotiation requests. Because the cryptographic renegotiation has a meaningful cost in computation cycles, this can cause an impact to the availability of the service when done in volume.(Citation: Arbor SSLDoS April 2012)
## Atomic Tests:
Currently, no tests are available for this technique.
## Detection
Detection of Endpoint DoS can sometimes be achieved before the effect is sufficient to cause significant impact to the availability of the service, but such response time typically requires very aggressive monitoring and responsiveness. Typical network throughput monitoring tools such as netflow, SNMP, and custom scripts can be used to detect sudden increases in circuit utilization.(Citation: Cisco DoSdetectNetflow) Real-time, automated, and qualitative study of the network traffic can identify a sudden surge in one type of protocol can be used to detect an attack as it starts.
In addition to network level detections, endpoint logging and instrumentation can be useful for detection. Attacks targeting web applications may generate logs in the web server, application server, and/or database server that can be used to identify the type of attack, possibly before the impact is felt.
Externally monitor the availability of services that may be targeted by an Endpoint DoS.
| github_jupyter |
```
''' IMPORTS, RELOAD THIS BLOCK WHEN NEEDED '''
from IPython.display import display
from GoogleImageSpider import *
from FaceClassifier import *
from ImageClassifier import *
import certifi, urllib3
import time
import json
import os
path = "trump_images"
start_time = time.time()
def print_time():
# TIMING CONTROL
elapsed_time = time.time() - start_time
mins = int(elapsed_time / 60)
secs = elapsed_time - (mins * 60)
print("Accumulative time: %02d:%02d" % (mins, int(secs % 60)))
''' FIRST STEP GET A "SMALL" SAMPLE OF TRUMP IMAGES, MAYBE 300 or 400 IMAGES '''
if not os.path.exists(path):
os.makedirs(path)
start_time = time.time()
# SECOND STEP DOWNLOAD IMAGES FROM EACH POKEMON NAME:
gis = GoogleImageSpider()
gis.get_images("trump", 300) #"trump pictures"
print ("Download and save..")
gis.save_images("trump", path)
gis.clear()
print ("Trump images downloaded")
print_time()
''' SECOND STEP: DETECT FACES AND EXPORT TO A NEW FOLDER '''
from PIL import Image
start_time = time.time()
destin_path = "faces_detected"
display_faces = False
face_size = 256
fc = FaceClassifier(face_resize=face_size)
#fc.train(show_train_images=True)
for image_filename in os.listdir(path):
image_path = path + "/" + image_filename
print ("Processing image: " + image_path)
#print (fc.predict(image_path))
data = fc.detect_face(image_path, grayscale_output = False)
for face in data:
if not face[0] is None:
img = Image.fromarray(face[0])
img.save(destin_path + "/" + image_filename)
if display_faces:
display(img)
print_time()
print ("Done!")
''' FOURTH STEP: IS TO TRAIN A NETWORK WITH THIS IMAGES (I MADE ONE SIMPLE
CONVOLUTIONAL CLASSIFIER, FEEL FREE TO UPDATE IT AND SEND A PUSH IT TO THE REPO)'''
from ImageClassifier import *
good_images_folder = "trump_classifier/good"
bad_images_folder = "trump_classifier/bad"
start_time = time.time()
ic = ImageClassifier(good_images_folder, bad_images_folder, 256, batch_size=50, training_epochs=2000, \
test_batch_percentage = 0) # Once proved that works, we need to use all possible inputs
#ic.test()
ic.train()
''' LAST STEP: FINALLY WE MIX ALL THAT TOGETHER, DOWNLOAD ALL THE DATASET, NORMALIZE,
AND LET OUR NEURAL NETWORK DISCRIMINATE THEM TO GET ONLY THE GOOD ONES.
THIS IS PRETTY MUCH COPY OF THE FIRST STEP CODE IMPROVED.
WE ARRIVE HERE IN LIKE 15 MINUTES, BUT NOW 100 IMAGES PER 949 POKEMONS WILL TAKE A WHILE..
TAKE A COFEE AND COME BACK IN A FEW HOURS ;)
GOOD NEWS IS THAT IS COMPLETELY FREE OF HUMAN INTERACTION '''
originals_temp_path = "trump_original_images"
source_path = "trump_destin_images"
good_images_folder = source_path+"/good"
bad_images_folder = source_path+"/bad"
images_to_download = 5000
start_time = time.time()
if not os.path.exists(source_path):
os.makedirs(source_path)
# DOWNLOAD IMAGES:
print ("Downloading images..")
gis = GoogleImageSpider()
gis.get_images("trump", images_to_download)
gis.save_images("trump", originals_temp_path)
print_time()
# DETECT AND CROP FACES:
print ("Detect and crop faces..")
face_size = 256
fc = FaceClassifier(face_resize=face_size)
for image_filename in os.listdir(originals_temp_path):
image_path = originals_temp_path + "/" + image_filename
print ("Processing image: " + image_path)
data = fc.detect_face(image_path, grayscale_output = False)
for face in data:
if not face[0] is None:
img = Image.fromarray(face[0])
img.save(source_path + "/" + image_filename)
print_time()
# RUN THE CNN TO CLASSSIFY GOOD AND BAD:
print ("Loading CNN..")
ic = ImageClassifier(None, None, 256)
ic.load()
print_time()
print ("Detect and crop faces..")
ic.run(source_path, good_images_folder, bad_images_folder, good_percent_treshold=85, delete_images=True)
print_time()
# END:
sys.stdout.flush() # For python command line
print ("Dataset finally done!")
```

| github_jupyter |
# T1018 - Remote System Discovery
Adversaries may attempt to get a listing of other systems by IP address, hostname, or other logical identifier on a network that may be used for Lateral Movement from the current system. Functionality could exist within remote access tools to enable this, but utilities available on the operating system could also be used such as [Ping](https://attack.mitre.org/software/S0097) or <code>net view</code> using [Net](https://attack.mitre.org/software/S0039). Adversaries may also use local host files (ex: <code>C:\Windows\System32\Drivers\etc\hosts</code> or <code>/etc/hosts</code>) in order to discover the hostname to IP address mappings of remote systems.
Specific to macOS, the <code>bonjour</code> protocol exists to discover additional Mac-based systems within the same broadcast domain.
Within IaaS (Infrastructure as a Service) environments, remote systems include instances and virtual machines in various states, including the running or stopped state. Cloud providers have created methods to serve information about remote systems, such as APIs and CLIs. For example, AWS provides a <code>DescribeInstances</code> API within the Amazon EC2 API and a <code>describe-instances</code> command within the AWS CLI that can return information about all instances within an account.(Citation: Amazon Describe Instances API)(Citation: Amazon Describe Instances CLI) Similarly, GCP's Cloud SDK CLI provides the <code>gcloud compute instances list</code> command to list all Google Compute Engine instances in a project, and Azure's CLI <code>az vm list</code> lists details of virtual machines.(Citation: Google Compute Instances)(Citation: Azure VM List)
## Atomic Tests
```
#Import the Module before running the tests.
# Checkout Jupyter Notebook at https://github.com/cyb3rbuff/TheAtomicPlaybook to run PS scripts.
Import-Module /Users/0x6c/AtomicRedTeam/atomics/invoke-atomicredteam/Invoke-AtomicRedTeam.psd1 - Force
```
### Atomic Test #1 - Remote System Discovery - net
Identify remote systems with net.exe.
Upon successful execution, cmd.exe will execute `net.exe view` and display results of local systems on the network that have file and print sharing enabled.
**Supported Platforms:** windows
#### Attack Commands: Run with `command_prompt`
```command_prompt
net view /domain
net view
```
```
Invoke-AtomicTest T1018 -TestNumbers 1
```
### Atomic Test #2 - Remote System Discovery - net group Domain Computers
Identify remote systems with net.exe querying the Active Directory Domain Computers group.
Upon successful execution, cmd.exe will execute cmd.exe against Active Directory to list the "Domain Computers" group. Output will be via stdout.
**Supported Platforms:** windows
#### Attack Commands: Run with `command_prompt`
```command_prompt
net group "Domain Computers" /domain
```
```
Invoke-AtomicTest T1018 -TestNumbers 2
```
### Atomic Test #3 - Remote System Discovery - nltest
Identify domain controllers for specified domain.
Upon successful execution, cmd.exe will execute nltest.exe against a target domain to retrieve a list of domain controllers. Output will be via stdout.
**Supported Platforms:** windows
#### Attack Commands: Run with `command_prompt`
```command_prompt
nltest.exe /dclist:domain.local
```
```
Invoke-AtomicTest T1018 -TestNumbers 3
```
### Atomic Test #4 - Remote System Discovery - ping sweep
Identify remote systems via ping sweep.
Upon successful execution, cmd.exe will perform a for loop against the 192.168.1.1/24 network. Output will be via stdout.
**Supported Platforms:** windows
#### Attack Commands: Run with `command_prompt`
```command_prompt
for /l %i in (1,1,254) do ping -n 1 -w 100 192.168.1.%i
```
```
Invoke-AtomicTest T1018 -TestNumbers 4
```
### Atomic Test #5 - Remote System Discovery - arp
Identify remote systems via arp.
Upon successful execution, cmd.exe will execute arp to list out the arp cache. Output will be via stdout.
**Supported Platforms:** windows
#### Attack Commands: Run with `command_prompt`
```command_prompt
arp -a
```
```
Invoke-AtomicTest T1018 -TestNumbers 5
```
### Atomic Test #6 - Remote System Discovery - arp nix
Identify remote systems via arp.
Upon successful execution, sh will execute arp to list out the arp cache. Output will be via stdout.
**Supported Platforms:** linux, macos
#### Dependencies: Run with `sh`!
##### Description: Check if arp command exists on the machine
##### Check Prereq Commands:
```sh
if [ -x "$(command -v arp)" ]; then exit 0; else exit 1; fi;
```
##### Get Prereq Commands:
```sh
echo "Install arp on the machine."; exit 1;
```
```
Invoke-AtomicTest T1018 -TestNumbers 6 -GetPreReqs
```
#### Attack Commands: Run with `sh`
```sh
arp -a | grep -v '^?'
```
```
Invoke-AtomicTest T1018 -TestNumbers 6
```
### Atomic Test #7 - Remote System Discovery - sweep
Identify remote systems via ping sweep.
Upon successful execution, sh will perform a ping sweep on the 192.168.1.1/24 and echo via stdout if an IP is active.
**Supported Platforms:** linux, macos
#### Attack Commands: Run with `sh`
```sh
for ip in $(seq 1 254); do ping -c 1 192.168.1.$ip; [ $? -eq 0 ] && echo "192.168.1.$ip UP" || : ; done
```
```
Invoke-AtomicTest T1018 -TestNumbers 7
```
### Atomic Test #8 - Remote System Discovery - nslookup
Powershell script that runs nslookup on cmd.exe against the local /24 network of the first network adaptor listed in ipconfig.
Upon successful execution, powershell will identify the ip range (via ipconfig) and perform a for loop and execute nslookup against that IP range. Output will be via stdout.
**Supported Platforms:** windows
Elevation Required (e.g. root or admin)
#### Attack Commands: Run with `powershell`
```powershell
$localip = ((ipconfig | findstr [0-9].\.)[0]).Split()[-1]
$pieces = $localip.split(".")
$firstOctet = $pieces[0]
$secondOctet = $pieces[1]
$thirdOctet = $pieces[2]
foreach ($ip in 1..255 | % { "$firstOctet.$secondOctet.$thirdOctet.$_" } ) {cmd.exe /c nslookup $ip}
```
```
Invoke-AtomicTest T1018 -TestNumbers 8
```
### Atomic Test #9 - Remote System Discovery - adidnsdump
This tool enables enumeration and exporting of all DNS records in the zone for recon purposes of internal networks
Python 3 and adidnsdump must be installed, use the get_prereq_command's to meet the prerequisites for this test.
Successful execution of this test will list dns zones in the terminal.
**Supported Platforms:** windows
Elevation Required (e.g. root or admin)
#### Dependencies: Run with `powershell`!
##### Description: Computer must have python 3 installed
##### Check Prereq Commands:
```powershell
if (python --version) {exit 0} else {exit 1}
```
##### Get Prereq Commands:
```powershell
echo "Python 3 must be installed manually"
```
##### Description: Computer must have pip installed
##### Check Prereq Commands:
```powershell
if (pip3 -V) {exit 0} else {exit 1}
```
##### Get Prereq Commands:
```powershell
echo "PIP must be installed manually"
```
##### Description: adidnsdump must be installed and part of PATH
##### Check Prereq Commands:
```powershell
if (cmd /c adidnsdump -h) {exit 0} else {exit 1}
```
##### Get Prereq Commands:
```powershell
pip3 install adidnsdump
```
```
Invoke-AtomicTest T1018 -TestNumbers 9 -GetPreReqs
```
#### Attack Commands: Run with `command_prompt`
```command_prompt
adidnsdump -u domain\user -p password --print-zones 192.168.1.1
```
```
Invoke-AtomicTest T1018 -TestNumbers 9
```
## Detection
System and network discovery techniques normally occur throughout an operation as an adversary learns the environment. Data and events should not be viewed in isolation, but as part of a chain of behavior that could lead to other activities, such as Lateral Movement, based on the information obtained.
Normal, benign system and network events related to legitimate remote system discovery may be uncommon, depending on the environment and how they are used. Monitor processes and command-line arguments for actions that could be taken to gather system and network information. Remote access tools with built-in features may interact directly with the Windows API to gather information. Information may also be acquired through Windows system management tools such as [Windows Management Instrumentation](https://attack.mitre.org/techniques/T1047) and [PowerShell](https://attack.mitre.org/techniques/T1059/001).
In cloud environments, the usage of particular commands or APIs to request information about remote systems may be common. Where possible, anomalous usage of these commands and APIs or the usage of these commands and APIs in conjunction with additional unexpected commands may be a sign of malicious use. Logging methods provided by cloud providers that capture history of CLI commands executed or API usage may be utilized for detection.
## Shield Active Defense
### Software Manipulation
Make changes to a system's software properties and functions to achieve a desired effect.
Software Manipulation allows a defender to alter or replace elements of the operating system, file system, or any other software installed and executed on a system.
#### Opportunity
There is an opportunity for the defender to observe the adversary and control what they can see, what effects they can have, and/or what data they can access.
#### Use Case
A defender can change the output of a recon commands to hide simulation elements you donโt want attacked and present simulation elements you want the adversary to engage with.
#### Procedures
Hook the Win32 Sleep() function so that it always performs a Sleep(1) instead of the intended duration. This can increase the speed at which dynamic analysis can be performed when a normal malicious file sleeps for long periods before attempting additional capabilities.
Hook the Win32 NetUserChangePassword() and modify it such that the new password is different from the one provided. The data passed into the function is encrypted along with the modified new password, then logged so a defender can get alerted about the change as well as decrypt the new password for use.
Alter the output of an adversary's profiling commands to make newly-built systems look like the operating system was installed months earlier.
Alter the output of adversary recon commands to not show important assets, such as a file server containing sensitive data.
| github_jupyter |
# BERT finetuning on Yelp NYC data
BERT, or Bidirectional Encoder Representations from Transformers, is a new method of fine tuning a pre-trained language model for specific NLP tasks. It has been shown to beat the SOTA methods in almost every domain, and is highly adaptable to a wide range of tasks. In this notebook, we will use a Google Colab cloud TPU to fine-tune the Large Uncased BERT model on our Yelp NYC hotel reviews. This notebook was created on Google Colab and then ported to our Jupyter notebook collection.
----
```
!pip install bert-tensorflow
import datetime
import json
import os
import pprint
import random
import string
import sys
import tensorflow as tf
import bert
from bert import run_classifier
from bert import optimization
from bert import tokenization
from sklearn.model_selection import train_test_split
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
from datetime import datetime
```
This section is Colab-specific and is used for setting up the GCP bucket to save the model checkpoints and weights in and authenticating the TPU.
```
# Set the output directory for saving model file
# Optionally, set a GCP bucket location
OUTPUT_DIR = 'yelp_finetuning'#@param {type:"string"}
#@markdown Whether or not to clear/delete the directory and create a new one
DO_DELETE = False #@param {type:"boolean"}
#@markdown Set USE_BUCKET and BUCKET if you want to (optionally) store model output on GCP bucket.
USE_BUCKET = True #@param {type:"boolean"}
BUCKET = 'lucas0' #@param {type:"string"}
if USE_BUCKET:
OUTPUT_DIR = 'gs://{}/{}'.format(BUCKET, OUTPUT_DIR)
from google.colab import auth
auth.authenticate_user()
if DO_DELETE:
try:
tf.gfile.DeleteRecursively(OUTPUT_DIR)
except:
# Doesn't matter if the directory didn't exist
pass
tf.gfile.MakeDirs(OUTPUT_DIR)
print('***** Model output directory: {} *****'.format(OUTPUT_DIR))
```
Because a TPU is a Google Cloud hosted runtime, we need to manually set up the data. I ran the protobuffer processing script on the YelpNYC dataset offline and then uploaded the text-ified version to our Google Cloud Storage bucket to be pulled.
```
from tensorflow import keras
import os
import re
# Download and process the dataset files.
def download_and_load_dataset(force_download=False):
dataset = tf.keras.utils.get_file(
fname="normalizedNYCYelp.txt",
origin="https://storage.googleapis.com/lucas0/imdb_classification/normalizedNYCYelp.txt",
extract=False)
dfile = open(dataset).read()
reviews = dfile.split('\n, ')
return reviews
```
Because we don't have the scripts directory (for this notebook, we will find a way to use our scripts in the future), I had to process the data manually. I took 5000 training and testing samples from the data, following the notebook from Google Research fine-tuning BERT for IMDB review sentiment analysis.
```
import numpy as np
reviews = download_and_load_dataset()
select = reviews[1:10001]
data = {}
data['review'] = []
data['deceptive'] = []
for x in select:
data['review'].append(x.split('\n')[0].split(': ')[1].replace('"', '').strip())
data['deceptive'].append(0 if 'label' in x else 1)
dataDict = pd.DataFrame.from_dict(data)
train = dataDict[:5000]
test = dataDict[5001:10001]
DATA_COLUMN = 'review'
LABEL_COLUMN = 'deceptive'
# label_list is the list of labels, i.e. True, False or 0, 1 or 'dog', 'cat'
label_list = [0, 1]
```
BERT uses a special type of tokenizer that breaks down words into their sub-grams. BERT stores multiple embeddings for one word, so that the word 'bank' in 'river bank' is seen as different to 'bank holiday'. This is why it outperforms other models thus far. In this code, BERT creates training and testing input examples by running some data through the classifier, so that the tokenizer can be built in the correct format.
```
# Use the InputExample class from BERT's run_classifier code to create examples from the data
train_InputExamples = train.apply(lambda x: bert.run_classifier.InputExample(guid=None, # Globally unique ID for bookkeeping, unused in this example
text_a = x[DATA_COLUMN],
text_b = None,
label = x[LABEL_COLUMN]), axis = 1)
test_InputExamples = test.apply(lambda x: bert.run_classifier.InputExample(guid=None,
text_a = x[DATA_COLUMN],
text_b = None,
label = x[LABEL_COLUMN]), axis = 1)
BERT_MODEL_HUB = "https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1"
def create_tokenizer_from_hub_module():
"""Get the vocab file and casing info from the Hub module."""
with tf.Graph().as_default():
bert_module = hub.Module(BERT_MODEL_HUB)
tokenization_info = bert_module(signature="tokenization_info", as_dict=True)
with tf.Session() as sess:
vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"],
tokenization_info["do_lower_case"]])
return bert.tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case)
tokenizer = create_tokenizer_from_hub_module()
```
Now we actually process our data. We convert the examples we generated from our data above into actual features. We will limit the sequences to be 128 characters long.
```
# We'll set sequences to be at most 128 tokens long.
MAX_SEQ_LENGTH = 128
# Convert our train and test features to InputFeatures that BERT understands.
train_features = bert.run_classifier.convert_examples_to_features(train_InputExamples, label_list, MAX_SEQ_LENGTH, tokenizer)
test_features = bert.run_classifier.convert_examples_to_features(test_InputExamples, label_list, MAX_SEQ_LENGTH, tokenizer)
```
Now we define our BERT model creation function! This simply creates and returns the network, and stores the loss in a publicly accessible Tensorflow variable.
```
def create_model(is_predicting, input_ids, input_mask, segment_ids, labels,
num_labels):
"""Creates a classification model."""
bert_module = hub.Module(
BERT_MODEL_HUB,
trainable=True)
bert_inputs = dict(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids)
bert_outputs = bert_module(
inputs=bert_inputs,
signature="tokens",
as_dict=True)
# Use "pooled_output" for classification tasks on an entire sentence.
# Use "sequence_outputs" for token-level output.
output_layer = bert_outputs["pooled_output"]
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
# Dropout helps prevent overfitting
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
# Convert labels into one-hot encoding
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
predicted_labels = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32))
# If we're predicting, we want predicted labels and the probabiltiies.
if is_predicting:
return (predicted_labels, log_probs)
# If we're train/eval, compute loss between predicted and actual label
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, predicted_labels, log_probs)
```
This function passses the parameters and evaluates our model using the powerful tf.Estimator. It creates an optimizer based on the learning rate and steps, and creates the model with our labels, feature shapes, etc, and returns a function to be used by tf.Estimator with everything it needs inside.
```
# model_fn_builder actually creates our model function
# using the passed parameters for num_labels, learning_rate, etc.
def model_fn_builder(num_labels, learning_rate, num_train_steps,
num_warmup_steps):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params):
"""The `model_fn` for TPUEstimator."""
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)
# TRAIN and EVAL
if not is_predicting:
(loss, predicted_labels, log_probs) = create_model(
is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)
train_op = bert.optimization.create_optimizer(
loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)
# Calculate evaluation metrics.
def metric_fn(label_ids, predicted_labels):
accuracy = tf.metrics.accuracy(label_ids, predicted_labels)
f1_score = tf.contrib.metrics.f1_score(
label_ids,
predicted_labels)
auc = tf.metrics.auc(
label_ids,
predicted_labels)
recall = tf.metrics.recall(
label_ids,
predicted_labels)
precision = tf.metrics.precision(
label_ids,
predicted_labels)
true_pos = tf.metrics.true_positives(
label_ids,
predicted_labels)
true_neg = tf.metrics.true_negatives(
label_ids,
predicted_labels)
false_pos = tf.metrics.false_positives(
label_ids,
predicted_labels)
false_neg = tf.metrics.false_negatives(
label_ids,
predicted_labels)
return {
"eval_accuracy": accuracy,
"f1_score": f1_score,
"auc": auc,
"precision": precision,
"recall": recall,
"true_positives": true_pos,
"true_negatives": true_neg,
"false_positives": false_pos,
"false_negatives": false_neg
}
eval_metrics = metric_fn(label_ids, predicted_labels)
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
train_op=train_op)
else:
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
eval_metric_ops=eval_metrics)
else:
(predicted_labels, log_probs) = create_model(
is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)
predictions = {
'probabilities': log_probs,
'labels': predicted_labels
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Return the actual model function in the closure
return model_fn
```
Here we programatically calculate how many training and warmup steps to do based on what our batch size and number of training epochs are. We also set up how often we take model checkpoints, in case our instance goes down and we lose all our progress.
```
BATCH_SIZE = 32
LEARNING_RATE = 2e-5
NUM_TRAIN_EPOCHS = 3.0
# Warmup is a period of time where the learning rate
# is small and gradually increases--usually helps training.
WARMUP_PROPORTION = 0.1
# Model configs
SAVE_CHECKPOINTS_STEPS = 500
SAVE_SUMMARY_STEPS = 100
# Compute # train and warmup steps from batch size
num_train_steps = int(len(train_features) / BATCH_SIZE * NUM_TRAIN_EPOCHS)
num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)
print(num_train_steps, num_warmup_steps)
# Specify output directory and number of checkpoint steps to save
run_config = tf.estimator.RunConfig(
model_dir=OUTPUT_DIR,
save_summary_steps=SAVE_SUMMARY_STEPS,
save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS)
```
Here we simply pass the steps calculated above to our model builder and then pass our model creator function to our estimator. Our estimator works as a sort of runtime manager, like keras.fit, where we specify batch size. It also sets up output directories for model checkpoints. We can see all the configs it sets up below in the output.
```
model_fn = model_fn_builder(
num_labels=len(label_list),
learning_rate=LEARNING_RATE,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config,
params={"batch_size": BATCH_SIZE})
```
This function simply returns a function that provides our input to the model.
```
# Create an input function for training. drop_remainder = True for using TPUs.
train_input_fn = bert.run_classifier.input_fn_builder(
features=train_features,
seq_length=MAX_SEQ_LENGTH,
is_training=True,
drop_remainder=True)
```
Now we begin training! Due to this being the large BERT model, it will take some time. However, as we're utilizing a Google Cloud TPU, this isn't a problem for us anymore.
```
print('Beginning Training!')
current_time = datetime.now()
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
print("Training took time ", datetime.now() - current_time)
```
And there we have it. After training for a little over 6 hours, we managed to achieve a loss of 0.5. Now, in order to evaluate our model, we need to create a test input function in the same fashion and evaluate it in non-training mode (prediction mode).
```
test_input_fn = bert.run_classifier.input_fn_builder(
features=test_features,
seq_length=MAX_SEQ_LENGTH,
is_training=False,
drop_remainder=False)
estimator.evaluate(input_fn=test_input_fn, steps=None)
```
Our accuracy on this run seems to be about .65, which is surprisingly good to me considering there was very little tweaking of the model done to be more fitted to our task rather than IMDB sentiment classification. It did get more true negatives than any other class, which is great for us, as a true negative corresponds to a correct deceptive classification.
```
def getPrediction(in_sentences):
labels = ["Deceptive", "Genuine"]
input_examples = [run_classifier.InputExample(guid="", text_a = x, text_b = None, label = 0) for x in in_sentences] # here, "" is just a dummy label
input_features = run_classifier.convert_examples_to_features(input_examples, label_list, MAX_SEQ_LENGTH, tokenizer)
predict_input_fn = run_classifier.input_fn_builder(features=input_features, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=False)
predictions = estimator.predict(predict_input_fn)
return [(sentence, prediction['probabilities'], labels[prediction['labels']]) for sentence, prediction in zip(in_sentences, predictions)]
```
Let's actually see its predictive power as a classifier. I wrote four 'deceptive' reviews, 2 negative and two positive.
```
pred_sentences = [
"This hotel was truly terrible. Will never be returning.",
"Rude staff, filthy room, crappy service. Won't be coming here again!",
"I loved everything about my stay. The Hilton was a fantastic hotel and I will be returning.",
"Absolutely fantastic! Great food, service and amenities. Looked after for the full duration."
]
predictions = getPrediction(pred_sentences)
predictions
```
This seems to align pretty much with our results, getting it 75% correct! Overall I think trained BERT representations used in conjunction with a finely tuned RNN architecture (with perhaps some convolution to enable cross-domain accuracy) could be the key to our success.
| github_jupyter |
## Facial Filters
Using your trained facial keypoint detector, you can now do things like add filters to a person's face, automatically. In this optional notebook, you can play around with adding sunglasses to detected face's in an image by using the keypoints detected around a person's eyes. Checkout the `images/` directory to see what pther .png's have been provided for you to try, too!
<img src="images/face_filter_ex.png" width=60% height=60%/>
Let's start this process by looking at a sunglasses .png that we'll be working with!
```
# import necessary resources
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import cv2
# load in sunglasses image with cv2 and IMREAD_UNCHANGED
sunglasses = cv2.imread('images/sunglasses.png', cv2.IMREAD_UNCHANGED)
# plot our image
plt.imshow(sunglasses)
# print out its dimensions
print('Image shape: ', sunglasses.shape)
```
## The 4th dimension
You'll note that this image actually has *4 color channels*, not just 3 as your avg RGB image does. This is due to the flag we set `cv2.IMREAD_UNCHANGED`, which tells this to read in another color channel.
#### Alpha channel
It has the usual red, blue, and green channels any color image has, and the 4th channel respresents the **transparency level of each pixel** in the image; this is often called the **alpha** channel. Here's how the transparency channel works: the lower the value, the more transparent, or see-through, the pixel will become. The lower bound (completely transparent) is zero here, so any pixels set to 0 will not be seen; these look like white background pixels in the image above, but they are actually totally transparent.
This transparent channel allows us to place this rectangular image of sunglasses on an image of a face and still see the face area that is techically covered by the transparentbackground of the sunglasses image!
Let's check out the alpha channel of our sunglasses image in the next Python cell. Because many of the pixels in the background of the image have an alpha value of 0, we'll need to explicitly print out non-zero values if we want to see them.
```
# print out the sunglasses transparency (alpha) channel
alpha_channel = sunglasses[:,:,3]
print ('The alpha channel looks like this (black pixels = transparent): ')
plt.imshow(alpha_channel, cmap='gray')
# just to double check that there are indeed non-zero values
# let's find and print out every value greater than zero
values = np.where(alpha_channel != 0)
print ('The non-zero values of the alpha channel are: ')
print (values)
```
#### Overlaying images
This means that when we place this sunglasses image on top of another image, we can use the transparency channel as a filter:
* If the pixels are non-transparent (alpha_channel > 0), overlay them on the new image
#### Keypoint locations
In doing this, it's helpful to understand which keypoint belongs to the eyes, mouth, etc., so in the image below we also print the index of each facial keypoint directly on the image so you can tell which keypoints are for the eyes, eyebrows, etc.,
<img src="images/landmarks_numbered.jpg" width=50% height=50%/>
It may be useful to use keypoints that correspond to the edges of the face to define the width of the sunglasses, and the locations of the eyes to define the placement.
Next, we'll load in an example image. Below, you've been given an image and set of keypoints from the provided training set of data, but you can use your own CNN model to generate keypoints for *any* image of a face (as in Notebook 3) and go through the same overlay process!
```
# load in training data
key_pts_frame = pd.read_csv('data/training_frames_keypoints.csv')
# print out some stats about the data
print('Number of images: ', key_pts_frame.shape[0])
# helper function to display keypoints
def show_keypoints(image, key_pts):
"""Show image with keypoints"""
plt.imshow(image)
plt.scatter(key_pts[:, 0], key_pts[:, 1], s=20, marker='.', c='m')
# a selected image
n = 120
image_name = key_pts_frame.iloc[n, 0]
image = mpimg.imread(os.path.join('data/training/', image_name))
key_pts = key_pts_frame.iloc[n, 1:].as_matrix()
key_pts = key_pts.astype('float').reshape(-1, 2)
print('Image name: ', image_name)
plt.figure(figsize=(5, 5))
show_keypoints(image, key_pts)
plt.show()
```
Next, you'll see an example of placing sunglasses on the person in the loaded image.
Note that the keypoints are numbered off-by-one in the numbered image above, and so `key_pts[0,:]` corresponds to the first point (1) in the labelled image.
```
# Display sunglasses on top of the image in the appropriate place
# copy of the face image for overlay
image_copy = np.copy(image)
# top-left location for sunglasses to go
# 17 = edge of left eyebrow
x = int(key_pts[17, 0])
y = int(key_pts[17, 1])
# height and width of sunglasses
# h = length of nose
h = int(abs(key_pts[27,1] - key_pts[34,1]))
# w = left to right eyebrow edges
w = int(abs(key_pts[17,0] - key_pts[26,0]))
# read in sunglasses
sunglasses = cv2.imread('images/sunglasses.png', cv2.IMREAD_UNCHANGED)
# resize sunglasses
new_sunglasses = cv2.resize(sunglasses, (w, h), interpolation = cv2.INTER_CUBIC)
# get region of interest on the face to change
roi_color = image_copy[y:y+h,x:x+w]
# find all non-transparent pts
ind = np.argwhere(new_sunglasses[:,:,3] > 0)
# for each non-transparent point, replace the original image pixel with that of the new_sunglasses
for i in range(3):
roi_color[ind[:,0],ind[:,1],i] = new_sunglasses[ind[:,0],ind[:,1],i]
# set the area of the image to the changed region with sunglasses
image_copy[y:y+h,x:x+w] = roi_color
# display the result!
plt.imshow(image_copy)
```
#### Further steps
Look in the `images/` directory to see other available .png's for overlay! Also, you may notice that the overlay of the sunglasses is not entirely perfect; you're encouraged to play around with the scale of the width and height of the glasses and investigate how to perform [image rotation](https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.html) in OpenCV so as to match an overlay with any facial pose.
| github_jupyter |
# Acme: Quickstart
## Guide to installing Acme and training your first D4PG agent.
# <a href="https://colab.research.google.com/github/deepmind/acme/blob/master/examples/quickstart.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Select your environment library
Note: `dm_control` requires a valid Mujoco license.
```
environment_library = 'gym' # @param ['dm_control', 'gym']
```
## Add your Mujoco license here
Note: only required for `dm_control`.
```
mjkey = """
""".strip()
if not mjkey and environment_library == 'dm_control':
raise ValueError(
'A Mujoco license is required for `dm_control`, if you do not have on '
'consider selecting `gym` from the dropdown menu in the cell above.')
```
## Installation
### Install Acme
```
!pip install dm-acme
!pip install dm-acme[reverb]
!pip install dm-acme[tf]
```
### Install the environment library
Without a valid license you won't be able to use the `dm_control` environments but can still follow this colab using the `gym` environments.
If you have a personal Mujoco license (_not_ an institutional one), you may
need to follow the instructions at https://research.google.com/colaboratory/local-runtimes.html to run a Jupyter kernel on your local machine.
This will allow you to install `dm_control` by following instructions in
https://github.com/deepmind/dm_control and using a personal MuJoCo license.
```
#@test {"skip": true}
if environment_library == 'dm_control':
mujoco_dir = "$HOME/.mujoco"
# Install OpenGL dependencies
!apt-get update && apt-get install -y --no-install-recommends \
libgl1-mesa-glx libosmesa6 libglew2.0
# Get MuJoCo binaries
!wget -q https://www.roboti.us/download/mujoco200_linux.zip -O mujoco.zip
!unzip -o -q mujoco.zip -d "$mujoco_dir"
# Copy over MuJoCo license
!echo "$mjkey" > "$mujoco_dir/mjkey.txt"
# Install dm_control
!pip install dm_control
# Configure dm_control to use the OSMesa rendering backend
%env MUJOCO_GL=osmesa
# Check that the installation succeeded
try:
from dm_control import suite
env = suite.load('cartpole', 'swingup')
pixels = env.physics.render()
except Exception as e:
raise RuntimeError(
'Something went wrong during installation. Check the shell output above '
'for more information. If you do not have a valid Mujoco license, '
'consider selecting `gym` in the dropdown menu at the top of this '
'Colab.') from e
else:
del suite, env, pixels
elif environment_library == 'gym':
!pip install gym
```
### Install visualization packages
```
!sudo apt-get install -y xvfb ffmpeg
!pip install imageio
!pip install PILLOW
!pip install pyvirtualdisplay
```
## Import Modules
```
import IPython
from acme import environment_loop
from acme import specs
from acme import wrappers
from acme.agents.tf import d4pg
from acme.tf import networks
from acme.tf import utils as tf2_utils
from acme.utils import loggers
import numpy as np
import sonnet as snt
# Import the selected environment lib
if environment_library == 'dm_control':
from dm_control import suite
elif environment_library == 'gym':
import gym
# Imports required for visualization
import pyvirtualdisplay
import imageio
import base64
# Set up a virtual display for rendering.
display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()
```
## Load an environment
We can now load an environment. In what follows we'll create an environment and grab the environment's specifications.
```
if environment_library == 'dm_control':
environment = suite.load('cartpole', 'balance')
elif environment_library == 'gym':
environment = gym.make('MountainCarContinuous-v0')
environment = wrappers.GymWrapper(environment) # To dm_env interface.
else:
raise ValueError(
"Unknown environment library: {};".format(environment_library) +
"choose among ['dm_control', 'gym'].")
# Make sure the environment outputs single-precision floats.
environment = wrappers.SinglePrecisionWrapper(environment)
# Grab the spec of the environment.
environment_spec = specs.make_environment_spec(environment)
```
## Create a D4PG agent
```
#@title Build agent networks
# Get total number of action dimensions from action spec.
num_dimensions = np.prod(environment_spec.actions.shape, dtype=int)
# Create the shared observation network; here simply a state-less operation.
observation_network = tf2_utils.batch_concat
# Create the deterministic policy network.
policy_network = snt.Sequential([
networks.LayerNormMLP((256, 256, 256), activate_final=True),
networks.NearZeroInitializedLinear(num_dimensions),
networks.TanhToSpec(environment_spec.actions),
])
# Create the distributional critic network.
critic_network = snt.Sequential([
# The multiplexer concatenates the observations/actions.
networks.CriticMultiplexer(),
networks.LayerNormMLP((512, 512, 256), activate_final=True),
networks.DiscreteValuedHead(vmin=-150., vmax=150., num_atoms=51),
])
# Create a logger for the agent and environment loop.
agent_logger = loggers.TerminalLogger(label='agent', time_delta=10.)
env_loop_logger = loggers.TerminalLogger(label='env_loop', time_delta=10.)
# Create the D4PG agent.
agent = d4pg.D4PG(
environment_spec=environment_spec,
policy_network=policy_network,
critic_network=critic_network,
observation_network=observation_network,
sigma=1.0,
logger=agent_logger,
checkpoint=False
)
# Create an loop connecting this agent to the environment created above.
env_loop = environment_loop.EnvironmentLoop(
environment, agent, logger=env_loop_logger)
```
## Run a training loop
```
# Run a `num_episodes` training episodes.
# Rerun this cell until the agent has learned the given task.
env_loop.run(num_episodes=100)
```
## Visualize an evaluation loop
### Helper functions for rendering and vizualization
```
# Create a simple helper function to render a frame from the current state of
# the environment.
if environment_library == 'dm_control':
def render(env):
return env.physics.render(camera_id=0)
elif environment_library == 'gym':
def render(env):
return env.environment.render(mode='rgb_array')
else:
raise ValueError(
"Unknown environment library: {};".format(environment_library) +
"choose among ['dm_control', 'gym'].")
def display_video(frames, filename='temp.mp4'):
"""Save and display video."""
# Write video
with imageio.get_writer(filename, fps=60) as video:
for frame in frames:
video.append_data(frame)
# Read video and display the video
video = open(filename, 'rb').read()
b64_video = base64.b64encode(video)
video_tag = ('<video width="320" height="240" controls alt="test" '
'src="data:video/mp4;base64,{0}">').format(b64_video.decode())
return IPython.display.HTML(video_tag)
```
### Run and visualize the agent in the environment for an episode
```
timestep = environment.reset()
frames = [render(environment)]
while not timestep.last():
# Simple environment loop.
action = agent.select_action(timestep.observation)
timestep = environment.step(action)
# Render the scene and add it to the frame stack.
frames.append(render(environment))
# Save and display a video of the behaviour.
display_video(np.array(frames))
```
| github_jupyter |
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/system-design-primer).
# Design a call center
## Constraints and assumptions
* What levels of employees are in the call center?
* Operator, supervisor, director
* Can we assume operators always get the initial calls?
* Yes
* If there is no available operators or the operator can't handle the call, does the call go to the supervisors?
* Yes
* If there is no available supervisors or the supervisor can't handle the call, does the call go to the directors?
* Yes
* Can we assume the directors can handle all calls?
* Yes
* What happens if nobody can answer the call?
* It gets queued
* Do we need to handle 'VIP' calls where we put someone to the front of the line?
* No
* Can we assume inputs are valid or do we have to validate them?
* Assume they're valid
## Solution
```
%%writefile call_center.py
from abc import ABCMeta, abstractmethod
from collections import deque
from enum import Enum
class Rank(Enum):
OPERATOR = 0
SUPERVISOR = 1
DIRECTOR = 2
class Employee(metaclass=ABCMeta):
def __init__(self, employee_id, name, rank, call_center):
self.employee_id = employee_id
self.name = name
self.rank = rank
self.call = None
self.call_center = call_center
def take_call(self, call):
"""Assume the employee will always successfully take the call."""
self.call = call
self.call.employee = self
self.call.state = CallState.IN_PROGRESS
def complete_call(self):
self.call.state = CallState.COMPLETE
self.call_center.notify_call_completed(self.call)
@abstractmethod
def escalate_call(self):
pass
def _escalate_call(self):
self.call.state = CallState.READY
call = self.call
self.call = None
self.call_center.notify_call_escalated(call)
class Operator(Employee):
def __init__(self, employee_id, name):
super(Operator, self).__init__(employee_id, name, Rank.OPERATOR)
def escalate_call(self):
self.call.level = Rank.SUPERVISOR
self._escalate_call()
class Supervisor(Employee):
def __init__(self, employee_id, name):
super(Operator, self).__init__(employee_id, name, Rank.SUPERVISOR)
def escalate_call(self):
self.call.level = Rank.DIRECTOR
self._escalate_call()
class Director(Employee):
def __init__(self, employee_id, name):
super(Operator, self).__init__(employee_id, name, Rank.DIRECTOR)
def escalate_call(self):
raise NotImplemented('Directors must be able to handle any call')
class CallState(Enum):
READY = 0
IN_PROGRESS = 1
COMPLETE = 2
class Call(object):
def __init__(self, rank):
self.state = CallState.READY
self.rank = rank
self.employee = None
class CallCenter(object):
def __init__(self, operators, supervisors, directors):
self.operators = operators
self.supervisors = supervisors
self.directors = directors
self.queued_calls = deque()
def dispatch_call(self, call):
if call.rank not in (Rank.OPERATOR, Rank.SUPERVISOR, Rank.DIRECTOR):
raise ValueError('Invalid call rank: {}'.format(call.rank))
employee = None
if call.rank == Rank.OPERATOR:
employee = self._dispatch_call(call, self.operators)
if call.rank == Rank.SUPERVISOR or employee is None:
employee = self._dispatch_call(call, self.supervisors)
if call.rank == Rank.DIRECTOR or employee is None:
employee = self._dispatch_call(call, self.directors)
if employee is None:
self.queued_calls.append(call)
def _dispatch_call(self, call, employees):
for employee in employees:
if employee.call is None:
employee.take_call(call)
return employee
return None
def notify_call_escalated(self, call): # ...
def notify_call_completed(self, call): # ...
def dispatch_queued_call_to_newly_freed_employee(self, call, employee): # ...
```
| github_jupyter |
# Fixpoint Quantization and Overflow #
This notebook shows how number can be represented in binary format and how to (re-)quantize signals. This is also shown practically in Python using ``pyfda_fix_lib`` (numpy based).
Most images don't show in the github HTML at the moment (05/2020). Running the notebook locally works as well as the Notebook Viewer at https://nbviewer.jupyter.org/. Simply copy and paste the URL of this notebook.
```
import os, sys
import time
module_path = os.path.abspath(os.path.join('..')) # append directory one level up to import path
if module_path not in sys.path: # ... if it hasn't been appended already
sys.path.append(module_path)
import dsp_nmigen.pyfda_fix_lib as fx
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('script.mplstyle')
import numpy as np
import scipy.signal as sig
figsize = {"figsize":(13,7)}
```
## Theory ##
Unsigned integers are
The next figure shows how signed integer numbers are represented in two's complement format. The MSB is the sign bit which can be interpreted as the negative value $-2^{W-1}$.
<figure>
<center>
<img src='img/twos_complement_signed_int.png' alt='Signed integers in twos complement' width='40%'/>
<figcaption><b>Fig. x:</b> Signed integers in twos complement</figcaption>
</center>
</figure>
The same is possible for signed fractional values, although it is important to realize that the *binary point only exists in the developer's head*! All arithmetic stuff can be implemented with "integer thinking" (and some designers just do that). IMHO, thinking in fractional numbers makes it easier to track the real world value and to separate the range and the resolution.
<figure>
<center>
<img src='img/twos_complement_signed_frac.png' alt='Signed fractional values in twos complement' width='50%'/>
<figcaption><b>Fig. x:</b> Signed fractional values in twos complement</figcaption>
</center>
</figure>
### Increasing the wordlength
Before adding two fixpoint numbers their binary points need to be aligned, i.e. they need to have the same number of integer and fractional bits. This can be achieved by extending integer and / or fractional part.
The value of a fixpoint number doesn't change when zeros are appended to the fractional part (for positive and negative numbers) so that part is easy.
Prepending zeros to the integer part would change the sign of a negative number. Instead, the integer part is extended with copies of the sign bit, also called **sign extension**. VHDL has the function ``SEXT(std_log_vect, int)`` for achieving this.
<img src="img/requant_extension.png" alt="Extnd integer and fractional part" width="40%"/>
### Reduce the number of fractional bits
The problem of reducing the number of fractional places is well known from the decimal system. Different methods have been developed to achieve this like rounding, truncation, see e.g. https://www.eetimes.com/an-introduction-to-different-rounding-algorithms for more details than you'll ever need.
<figure>
<img src='img/requant_reduce_fractional.png' alt='Reduce fractional word length' width='80%'/>
<figcaption>Fig. x: Reduction of fractional wordlength</figcaption>
</figure>
## pyfda_fix_lib
This section uses and describes the fixpoint library `pyfda_fix_lib.py`. A quantizer is constructed as an instance of the class `Fixed()`, its properties are configured with a quantization dict `Q = fx.Fixed(q_dict)`. The quantization dict (in this case `q_dict`) sets the format `QI.QF` at the output of the quantizer and its quantization and overflow behaviour with the following keys:
- **'WI'** : number of integer bits (integer)
- **'WF'** : number of fractional bits (integer)
- **'quant'**: requantization behaviour (**'floor'**, 'round', 'fix', 'ceil', 'rint', 'none')
- **'ovfl'** : overflow behaviour (**'wrap'**, 'sat', 'none')
- **'frmt'** : number base / format for output (**'float'**, 'dec', 'bin', 'hex', 'csd'). Non-floats are scaled with 'scale'
- **'scale'**: float or a keyword; the factor between the fixpoint integer representation (FXP) and the real world value (RWV), RWV = FXP / scale. By default, scale = 1 << WI. If ``scale`` is a float, this value is used.
**Examples:**
```
WI.WF = 3.0, FXP = "b0110." = 6, scale = 8 -> RWV = 6 / 8 = 0.75
WI.WF = 1.2, FXP = "b01.10" = 1.5, scale = 2 -> RWV = 1.5 / 2 = 0.75
```
Alternatively, if:
- ``q_obj['scale'] == 'int'``: `scale = 1 << self.WF`
- ``q_obj['scale'] == 'norm'``: `scale = 2.**(-self.WI)`
`?fx.Fixed` shows the available options.
### Example
In the following example a quantizer is defined with an output format of 0 integer bits and 3 fractional bits, overflows are wrapped around in two's complement style and additional fractional bits are simply truncated ("floor").
```
q_dict = {'WI':0, 'WF': 3, # number of integer and fractional bits
'quant':'floor', 'ovfl': 'wrap'} # quantization and overflow behaviour
Q = fx.Fixed(q_dict) # instance of fixpoint class Fixed()
for i in np.arange(12)/10: # i = 0, 0.1, 0.2, ...
print("q<{0:>3.2f}> = {1:>5.3f}".format(i, Q.fixp(i))) # quantize i
# uncomment to show documentation
# ?fx.Fixed
```
### Signal Quantization
A sine signal $s(t)$ is quantized in the code below. The plot shows $s(t)$, the quantized signal $s_Q(t)$ the difference between both signals, the quantization error $\epsilon(t)$.
```
N = 10000; f_a = 1
t = np.linspace(0, 1, N, endpoint=False)
s = 1.1 * np.sin(2 * np.pi * f_a * t)
#
q_dict = {'WI':0, 'WF': 4, 'quant':'fix', 'ovfl': 'wrap'} # also try 'round' ; 'sat'
Q = fx.Fixed(q_dict) # quantizer instance with parameters defined above
t_cpu = time.perf_counter()
sq = Q.fixp(s) # quantize s
print('Overflows:\t{0}'.format(Q.N_over))
print('Run time:\t{0:.3g} ms for {1} quantizations\n'.format((time.perf_counter()-t_cpu)*1000, Q.N))
#
fig1, ax1 = plt.subplots(**figsize)
ax1.set_title('Quantized Signal $s_Q$({0}.{1}) with Quantizer Settings "{2}", "{3}"'.format(Q.WI, Q.WF, Q.ovfl, Q.quant))
ax1.plot(t, s, label = r'$s(t)$', lw=2)
ax1.step(t, sq, where = 'post', label = r'$s_Q(t)$', lw=2)
ax1.plot(t, s-sq, label = r'$\epsilon(t) = s(t) - s_Q(t)$', lw=2)
ax1.legend(fontsize = 14)
ax1.grid(True)
ax1.set_xlabel(r'$t \rightarrow$'); ax1.set_ylabel(r'$s \rightarrow$');
#
```
### Transfer Function of the Quantizer
The transfer function of the quantizer shows the quantized signal $s_Q(t)$ over the input signal $s(t)$ (a ramp). The advantage ove reusing the sine from the previous cell is that the input range can be taylored more easily and that the step size is constant.
```
Q.resetN() # reset overflow counter
x = np.linspace(-2, 2, N, endpoint=False) # generate ramp signal
xq = Q.fixp(x) # quantize x
print('Overflows:\t{0}'.format(Q.N_over))
fig2, ax2 = plt.subplots(**figsize); ax2.grid(True)
ax2.set_title('Quantization Transfer Function')
ax2.step(x,xq, where = 'post')
ax2.set_xlabel(r'$x \rightarrow$'); ax2.set_ylabel(r'$x_Q \rightarrow$');
```
### Number bases and formats
Quantized values can be printed in different number bases, e.g. as a binary string:
```
q_dict_f = {'WI':0, 'WF': 8, # number of ingeger and fractional bits
'quant':'floor', 'ovfl': 'wrap', # quantization and overflow behaviour
'frmt':'bin'} # output format
Q1 = fx.Fixed(q_dict_f) # instance of fixpoint class Fixed()
for i in np.arange(12)/10: # i = 0, 0.1, 0.2, ...
print("q<{0:>3.2f}> = {1}".format(i, Q1.float2frmt(Q1.fixp(i)))) # quantize i + display it in the wanted format
```
### Format Conversion
Numbers can also be converted back to floating with the method `frmt2float()`:
```
Q0 = fx.Fixed({'Q':'3.8', 'quant':'round', 'ovfl':'wrap', 'frmt':'bin'}) # direct setting of quantization options, use 'Q' instead of
Q1_dict = Q0.q_obj # read out Q0 quantization dict
Q1_dict.update({'WI':1, 'WF':5}) # update dict
Q1 = fx.Fixed(Q1_dict) # and create a new quantizer instance with it
frmt_str = "0.011"
x_org = Q0.frmt2float(frmt_str)
x_q = Q1.frmt2float(frmt_str)
print("q<b{0}> = {1} -> b{2} = {3}".format(frmt_str, x_org, Q1.float2frmt(x_q), x_q))
```
Bug ?: Illegal characters in the string to be formatted (e.g. frmt2float("0.0x11") do not raise an error but are ignored.
## FIR Filters
The following cell designs an equiripple filter and plots its impulse response (same as the coefficients) and its magnitude frequency response.
```
numtaps = 50 # filter order
N_FFT = 2000 # number of frequency bins per half plane
b = sig.remez(numtaps,[0,0.1,0.12, 0.5], [1,0], [1,10]) # frequency bands, target amplitude, weights
w, h = sig.freqz(b, [1], worN=N_FFT)
f = w / (2*np.pi)
fig, (ax1, ax2) = plt.subplots(2, **figsize); ax1.grid(True); ax2.grid(True)
ax1.set_title('Equiripple Lowpass (FIR) Filter: Impulse Response')
ax1.stem(np.arange(numtaps), b, use_line_collection=True)
ax1.set_xlabel(r'$n \rightarrow$'); ax1.set_ylabel(r'$b_n = h[n] \rightarrow$')
ax2.set_title('Magnitude Frequency Response')
ax2.plot(f,np.abs(h))
ax2.set_xlabel(r'$F \rightarrow$'); ax2.set_ylabel(r'$|H(F)| \rightarrow$')
fig.set_tight_layout(True)
```
Fixpoint filters process quantized input data (quantizer $Q_X$ in the image below), have quantized coeffients, a maximum accumulator width ($Q_A$) and a quantized output ($Q_Y$). Fig xx shows the topology of a direct form FIR filter.
<figure>
<center>
<img src='img/fir_df.png' alt='Direct Form FIR Filter' width='30%'/>
<figcaption><b>Fig. x:</b> Direct Form FIR Filter</figcaption>
</center>
</figure>
Due to the non-linear effects of quantization, fixpoint filters can only be simulated in the time domain, taking the filter topology into account. It can make a large difference for IIR filters in which order recursive and transversal part of the filter are calculated (direct form 1 vs. direct form 2) which is not the case for ideal systems.
Fixpoint filters process quantized input data (quantizer $Q_X$ in the image above), have quantized coeffients, a maximum accumulutator width ($Q_A$) and a quantized output ($Q_Y$).
```
class FIX_FIR_DF(fx.Fixed):
"""
Usage:
Q = FIX_FIR_DF(q_mul, q_acc) # Instantiate fixpoint filter object
x_bq = self.Q_mul.fxp_filt(x[k:k + len(bq)] * bq)
The fixpoint object has two different quantizers:
- q_mul describes requanitization after coefficient multiplication
- q_acc describes requantization after each summation in the accumulator
(resp. in the common summation point)
"""
def __init__(self, q_mul, q_acc):
"""
Initialize fixed object with q_obj
"""
# test if all passed keys of quantizer object are known
self.Q_mul = fx.Fixed(q_mul)
self.Q_mul.resetN() # reset overflow counter of Q_mul
self.Q_acc = fx.Fixed(q_acc)
self.Q_acc.resetN() # reset overflow counter of Q_acc
self.resetN() # reset filter overflow-counter
def fxp_filt_df(self, x, bq, verbose = True):
"""
Calculate filter (direct form) response via difference equation with
quantization
Parameters
----------
x : scalar or array-like
input value(s)
bq : array-like
filter coefficients
Returns
-------
yq : ndarray
The quantized input value(s) as an ndarray with np.float64. If this is
not what you want, see examples.
"""
# Initialize vectors (also speeds up calculation)
yq = accu_q = np.zeros(len(x))
x_bq = np.zeros(len(bq))
for k in range(len(x) - len(bq)):
# weighted state-vector x at time k:
x_bq = self.Q_mul.fixp(x[k:k + len(bq)] * bq)
# sum up x_bq to get accu[k]
accu_q[k] = self.Q_acc.fixp(sum(x_bq))
yq = accu_q # scaling at the output of the accumulator
if (self.Q_mul.N_over and verbose): print('Overflows in Multiplier: ',
Fixed.Q_mul.N_over)
if (self.Q_acc.N_over and verbose): print('Overflows in Accumulator: ',
self.Q_acc.N_over)
self.N_over = self.Q_mul.N_over + self.Q_acc.N_over
return yq
# nested loop would be much slower!
# for k in range(Nx - len(bq)):
# for i in len(bq):
# accu_q[k] = fixed(q_acc, (accu_q[k] + fixed(q_mul, x[k+i]*bq[i+1])))
q_bxy = {'WI':0, 'WF': 7, 'quant':'floor', 'ovfl': 'sat'} # quantization dict for x, q and coefficients
q_accu = {'WI':0, 'WF': 15, 'quant':'floor', 'ovfl': 'wrap'} # ... for accumulator
Q_X = fx.Fixed(q_bxy); Q_Y = fx.Fixed(q_bxy); Q_b = fx.Fixed(q_bxy); Q_accu = fx.Fixed(q_accu)
fil_q = FIX_FIR_DF(q_accu, q_accu)
x = np.zeros(1000); x[0] = 1
xq = Q_X.fixp(x); bq = Q_b.fixp(b)
yq = fil_q.fxp_filt_df(xq,bq)
fig, (ax1, ax2) = plt.subplots(2, **figsize); ax1.grid(True); ax2.grid(True)
ax1.set_title('Equiripple Lowpass (FIR) Filter')
ax1.stem(np.arange(numtaps), yq[:numtaps], use_line_collection=True)
ax1.set_xlabel(r'$n \rightarrow$'); ax1.set_ylabel(r'$b_n = h[n] \rightarrow$')
ax2.plot(f,np.abs(h), label="ideal system")
ax2.plot(f, np.abs(np.fft.rfft(yq, 2*N_FFT)[:-1]), label = "quantized system")
ax2.set_xlabel(r'$F \rightarrow$'); ax2.set_ylabel(r'$|H(F)| \rightarrow$')
ax2.legend()
fig.set_tight_layout(True)
```
## IIR Filters
Unlike FIR filters, IIR filters cannot be implemented with the same elegance and efficiency of array mathematics as each sample depends on the output sample as well. The following code cell designs an elliptic low pass filter and plots its magnitude frequency response.
```
b,a = sig.ellip(4,1,40, 2*0.1) # order, pass band ripple, stop band ripple, corner frequency w.r.t. f_S/2
w, h = sig.freqz(b,a, worN=2000)
fig, ax = plt.subplots(1, **figsize); ax.grid(True)
ax.set_title('Elliptic Lowpass (IIR) Filter')
ax.plot(w / (2*np.pi),20*np.log10(np.abs(h)))
ax.set_xlabel(r'$F \rightarrow$'); ax.set_ylabel(r'$|H(F)| \rightarrow$')
print("b=", b)
print("a=", a)
```
<figure>
<center>
<img src='img/iir_df1_df2.png' alt='Direct Form IIR Filter' width='60%'/>
<figcaption><b>Fig. x:</b> Direct Form FIR Filter Type 1 and 2</figcaption>
</center>
</figure>
The following two examples show how to implement the most simple recursive filters (no transversal path, i.e. "all-pole filters").
```
def IIR1(Q_ACCU, x, a):
"""
Rekursives Filter mit y[i] = Q< x[i-1] + a y[i-1] >
"""
y = np.zeros(len(x))
for i in range(0,len(x)-1):
y[i+1] = Q_ACCU.fixp(x[i] + a * y[i])
return y
def IIR2(Q_ACCU, x, a):
"""
Rekursives Filter mit y[i] = Q< x[i-2] + y[i-1] - a y[i-2] >
"""
y = np.zeros(len(x))
for i in range(len(x)-2):
y[i+2] = Q_ACCU.fixp(x[i] + y[i+1] - y[i]*a)
return y
alpha = 0.93 # coefficient
N_sim = 200 # number of simulation steps
x = np.zeros(N_sim); x[0] = 1.0 # x is dirac pulse with weight 1
q_x = {'WI':0,'WF':3,'quant':'round','ovfl':'sat'} # fixpoint quantization for stimulus
q_coeff = {'WI':0,'WF':3,'quant':'round','ovfl':'wrap'} # coefficient quantization dict
# quantizer settings for accumulator
#q_accu = {'WI':0,'WF':4,'quant':'fix','ovfl':'sat'} # saturation and round towards zero -> no limit cycles
q_accu = {'Q':0.8,'quant':'floor','ovfl':'wrap'} # groรe Grenzzyklen bei QI = 0
# kleine Grenzzyklen mit round / floor, abhรคngig von alpha:
# q_accu = {'WI':0,'WF':4,'quant':'floor','ovfl':'wrap'}
# Keine Quantisierung -> Werte fรผr I, F beliebig
q_ideal = {'WI':0,'WF':0,'quant':'none','ovfl':'none'}
Q_coeff = fx.Fixed(q_coeff) # Fixpoint Object mit Parametern "q_coeff"
Q_ideal = fx.Fixed(q_ideal) # Fixpoint-Objekt ohne Quantisierung und Overflow
Q_accu = fx.Fixed(q_accu) # Fixpoint-Objekt mit Parametern "q_accu"
n = np.arange(N_sim)
t1 = time.perf_counter()
alpha_q = Q_ideal.fixp(alpha)
y = IIR1(Q_ideal, x, alpha_q) # ohne Quantisierung
#yq = IIR2(fx_IIR, x, alpha_q)
yq = IIR1(Q_accu, x, alpha_q)
```
***
### Copyright
(c) 2016 - 2020 Christian Mรผnker
This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources) , feel free to use it for your own purposes. Please attribute the work as follows: *Christian Mรผnker, dsp_migen documentation*.
| github_jupyter |
# Recurrent Neural Nets - Fake News
The RNN (LSTM) architechture that we are using is shown below, a many to one RNN.

<img src='https://media.giphy.com/media/l0Iyau7QcKtKUYIda/giphy.gif'>
We achieve 87% accuracy in a test set. However, the article in Second reference claims to have 93% accuracy. The main difference is that they seem to use a Bag of Words Model, which loses the order of words when sending into the ML algorithm. Also
## References:
1. Data: https://github.com/GeorgeMcIntire/fake_real_news_dataset
2. Classification using Scikit Learn: https://blog.kjamistan.com/comparing-scikit-learn-text-classifiers-on-a-fake-news-dataset/
3. Glove vectors: https://nlp.stanford.edu/projects/glove/
```
!pip install tqdm
!conda install -y Pillow
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
from keras.models import Sequential
from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization, LSTM, Embedding, Reshape
from keras.models import load_model, model_from_json
from sklearn.model_selection import train_test_split
import os
import urllib
from urllib.request import urlretrieve
from os import mkdir, makedirs, remove, listdir
from collections import Counter
from utilties import *
folder_path = 'data'
file = './data/fakenews.zip'
url = 'https://github.com/GeorgeMcIntire/fake_real_news_dataset/raw/master/fake_or_real_news.csv.zip'
downloadData(file, url)
#################################
# Download GLOVE vector dataset
#################################
file = './data/glove.6B.zip'
url = 'http://nlp.stanford.edu/data/glove.6B.zip'
downloadData(file, url)
with open('./data/glove.6B.50d.txt','rb') as f:
lines = f.readlines()
glove_weights = np.zeros((len(lines), 50))
words = []
for i, line in enumerate(lines):
word_weights = line.split()
words.append(word_weights[0])
weight = word_weights[1:]
glove_weights[i] = np.array([float(w) for w in weight])
word_vocab = [w.decode("utf-8") for w in words]
word2glove = dict(zip(word_vocab, glove_weights))
```
Preprocessing steps: lower case, remove urls, some punctuations etc.
```
from keras.engine.topology import Layer
import keras.backend as K
from keras import initializers
import numpy as np
class Embedding2(Layer):
def __init__(self, input_dim, output_dim, fixed_weights, embeddings_initializer='uniform',
input_length=None, **kwargs):
kwargs['dtype'] = 'int32'
if 'input_shape' not in kwargs:
if input_length:
kwargs['input_shape'] = (input_length,)
else:
kwargs['input_shape'] = (None,)
super(Embedding2, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = embeddings_initializer
self.fixed_weights = fixed_weights
self.num_trainable = input_dim - len(fixed_weights)
self.input_length = input_length
w_mean = fixed_weights.mean(axis=0)
w_std = fixed_weights.std(axis=0)
self.variable_weights = w_mean + w_std*np.random.randn(self.num_trainable, output_dim)
def build(self, input_shape, name='embeddings'):
fixed_weight = K.variable(self.fixed_weights, name=name+'_fixed')
variable_weight = K.variable(self.variable_weights, name=name+'_var')
self._trainable_weights.append(variable_weight)
self._non_trainable_weights.append(fixed_weight)
self.embeddings = K.concatenate([fixed_weight, variable_weight], axis=0)
self.built = True
def call(self, inputs):
if K.dtype(inputs) != 'int32':
inputs = K.cast(inputs, 'int32')
out = K.gather(self.embeddings, inputs)
return out
def compute_output_shape(self, input_shape):
if not self.input_length:
input_length = input_shape[1]
else:
input_length = self.input_length
return (input_shape[0], input_length, self.output_dim)
df = pd.read_csv('data/fake_or_real_news.csv')
df.drop('Unnamed: 0', axis=1, inplace=True)
df.title = df.title.str.lower()
df.text = df.text.str.lower()
df.title = df.title.str.replace(r'http[\w:/\.]+','<URL>') # remove urls
df.text = df.text.str.replace(r'http[\w:/\.]+','<URL>') # remove urls
df.title = df.title.str.replace(r'[^\.\w\s]','') #remove everything but characters and punctuation
df.text = df.text.str.replace(r'[^\.\w\s]','') #remove everything but characters and punctuation
df.title = df.title.str.replace(r'\.\.+','.') #replace multple periods with a single one
df.text = df.text.str.replace(r'\.\.+','.') #replace multple periods with a single one
df.title = df.title.str.replace(r'\.',' . ') #replace periods with a single one
df.text = df.text.str.replace(r'\.',' . ') #replace multple periods with a single one
df.title = df.title.str.replace(r'\s\s+',' ') #replace multple white space with a single one
df.text = df.text.str.replace(r'\s\s+',' ') #replace multple white space with a single one
df.title = df.title.str.strip()
df.text = df.text.str.strip()
print(df.shape)
df.head()
```
Get all the unique words. We will only consider words that have been used more than 5 times. Finally from this we create a dictionary mapping words to integers.
Once this is done we will create a list of reviews where the words are converted to ints.
```
all_text = ' '.join(df.text.values)
words = all_text.split()
u_words = Counter(words).most_common()
u_words_counter = u_words
u_words_frequent = [word[0] for word in u_words if word[1]>5] # we will only consider words that have been used more than 5 times
u_words_total = [k for k,v in u_words_counter]
word_vocab = dict(zip(word_vocab, range(len(word_vocab))))
word_in_glove = np.array([w in word_vocab for w in u_words_total])
words_in_glove = [w for w,is_true in zip(u_words_total,word_in_glove) if is_true]
words_not_in_glove = [w for w,is_true in zip(u_words_total,word_in_glove) if not is_true]
print('Fraction of unique words in glove vectors: ', sum(word_in_glove)/len(word_in_glove))
# # create the dictionary
word2num = dict(zip(words_in_glove,range(len(words_in_glove))))
len_glove_words = len(word2num)
freq_words_not_glove = [w for w in words_not_in_glove if w in u_words_frequent]
b = dict(zip(freq_words_not_glove,range(len(word2num), len(word2num)+len(freq_words_not_glove))))
word2num = dict(**word2num, **b)
word2num['<Other>'] = len(word2num)
num2word = dict(zip(word2num.values(), word2num.keys()))
int_text = [[word2num[word] if word in word2num else word2num['<Other>']
for word in content.split()] for content in df.text.values]
print('The number of unique words are: ', len(u_words))
print('The first review looks like this: ')
print(int_text[0][:20])
print('And once this is converted back to words, it looks like: ')
print(' '.join([num2word[i] for i in int_text[0][:20]]))
plt.hist([len(t) for t in int_text],50)
plt.show()
print('The number of articles greater than 500 in length is: ', np.sum(np.array([len(t)>500 for t in int_text])))
print('The number of articles less than 50 in length is: ', np.sum(np.array([len(t)<50 for t in int_text])))
```
You cannot pass differing lengths of sentences to the algorithm. Hence we shall prepad the sentence with `<PAD>`. Sequences less than 500 in length will be prepadded and sequences that are longer than 500 will be truncated. It is assumed that the sentiment of the review can be asserted from the first 500 words.
```
num2word[len(word2num)] = '<PAD>'
word2num['<PAD>'] = len(word2num)
for i, t in enumerate(int_text):
if len(t)<500:
int_text[i] = [word2num['<PAD>']]*(500-len(t)) + t
elif len(t)>500:
int_text[i] = t[:500]
else:
continue
x = np.array(int_text)
y = (df.label.values=='REAL').astype('int')
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=42)
```
A real news article:
```
df[df.label=='REAL'].text.values[0]
```
A fake news article:
```
df[df.label=='FAKE'].text.values[0]
```
## Many to One LSTM
### Basic Method:
This method is no different to the method utilised in the sentiment analysis lesson.
```
model = Sequential()
model.add(Embedding(len(word2num), 50)) # , batch_size=batch_size
model.add(LSTM(64))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.summary()
batch_size = 128
epochs = 5
model.fit(X_train, y_train, batch_size=batch_size, epochs=1, validation_data=(X_test, y_test))
```
### Method 2: Fixed Embeddings
This is where we use the `Embedding2` class to which we give a set of weights which remain the same through training. Note especially the number of trainable parameters in the summary.
```
model = Sequential()
model.add(Embedding2(len(word2num), 50,
fixed_weights=np.array([word2glove[w] for w in words_in_glove]))) # , batch_size=batch_size
model.add(LSTM(64))
model.add(Dense(1, activation='sigmoid'))
# rmsprop = keras.optimizers.RMSprop(lr=1e-4)
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.summary()
```
I may heave cheated and run the following block 3 times. Good thing about Keras is that it remembers the last learning rate and goes from there.
```
batch_size = 128
model.fit(X_train, y_train, batch_size=batch_size, epochs=15, validation_data=(X_test, y_test))
sentence = "North korea is testing out missiles on americans living overseas .".lower()
sentence_num = [word2num[w] if w in word2num else word2num['<Other>'] for w in sentence.split()]
sentence_num = [word2num['<PAD>']]*(500-len(sentence_num)) + sentence_num
sentence_num = np.array(sentence_num)
model.predict(sentence_num[None,:])
' '.join([num2word[w] for w in sentence_num])
sentence = "The chemicals in the water is turning the freaking frogs gay says cnn . ".lower()
sentence_num = [word2num[w] if w in word2num else word2num['<Other>'] for w in sentence.split()]
sentence_num = [word2num['<PAD>']]*(500-len(sentence_num)) + sentence_num
sentence_num = np.array(sentence_num)
model.predict(sentence_num[None,:])
sentence = "President Trump is the greatest president of all time period .".lower()
sentence_num = [word2num[w] if w in word2num else word2num['<Other>'] for w in sentence.split()]
sentence_num = [word2num['<PAD>']]*(0) + sentence_num
sentence_num = np.array(sentence_num)
model.predict(sentence_num[None,:])
model.evaluate(X_test, y_test)
```
| github_jupyter |
# Leave-One-Patient-Out classification of individual volumes
Here, we train a classifier for each patient, based on the data of all the other patients except the current one (Leave One Out Cross-Validation). To this end, we treat each volume as an independent observation, so we have a very large sample of volumes which are used for training; and later, we do not classify the patient as a whole, but the classifier makes a decision for each of the held-out patient's 200 volumes. Therefore, at this stage, we have not made a decision on the patient level, but only at the volume-as-unit-of-observation level.
### import modules
```
import os
import pickle
import numpy as np
import pandas as pd
from sklearn import svm, preprocessing, metrics
from PIL import Image
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('ticks')
sns.set_context('poster')
sns.set_context('poster')
# after converstion to .py, we can use __file__ to get the module folder
try:
thisDir = os.path.realpath(__file__)
# in notebook form, we take the current working directory (we need to be in 'notebooks/' for this!)
except:
thisDir = '.'
# convert relative path into absolute path, so this will work with notebooks and py modules
supDir = os.path.abspath(os.path.join(os.path.dirname(thisDir), '..'))
supDir
```
### get meta df
We need this e.g. to get information about conclusiveness
```
data_df = pd.read_csv(
'../data/interim/csv/info_epi_zscored_zdiff_summarymaps_2dpredclean_corr_df.csv',
index_col=[0, 1],
header=0)
data_df.tail()
```
#### conclusiveness filters
```
is_conclusive = data_df.loc[:, 'pred'] != 'inconclusive'
is_conclusive.sum()
```
### get data
```
def make_group_df(data_df,metric='corr_df'):
'''load correlation data of all patients'''
group_df = pd.DataFrame()
for p in data_df.index:
# get data
filename = data_df.loc[p, metric]
this_df = pd.read_csv(filename, index_col=[0], header=0)
# add patient infos to index
this_df.index = [[p[0]], [p[1]]]
group_df = pd.concat([group_df, this_df])
# reorder the colums and make sure volumes are integer values
group_df.columns = group_df.columns.astype(int)
# sort across rows, then across columns, to make sure that volumes
# are in the right order
group_df = group_df.sort_index(axis=0)
group_df = group_df.sort_index(axis=1)
assert all(group_df.columns == range(200)), 'wrong order of volumes'
return group_df
group_df = make_group_df(data_df)
group_df.tail()
```
#### filter data
```
# only conclusive cases
conclusive_df = group_df[is_conclusive]
# only inconclusive cases
inconclusive_df = group_df[is_conclusive == False]
# all cases unfiltered
withinconclusive_df = group_df.copy()
print(conclusive_df.shape, inconclusive_df.shape, withinconclusive_df.shape)
```
### get design
```
conds_file = os.path.join(supDir,'models','conds.p')
with open(conds_file, 'rb') as f:
conds = pickle.load(f)
print(conds)
```
### get colors
```
with open('../models/colors.p', 'rb') as f:
color_dict = pickle.load(f)
my_cols = {}
for i, j in zip(['red', 'blue', 'yellow'], ['left', 'right', 'bilateral']):
my_cols[j] = color_dict[i]
```
### invert the resting timepoints
```
inv_df = conclusive_df*conds
inv_df.tail()
```
### train the classifier
```
stack_df = pd.DataFrame(inv_df.stack())
stack_df.tail()
stack_df.shape
my_groups = ['left','bilateral','right']
dynamite_df = stack_df.copy()
dynamite_df.columns = ['correlation']
dynamite_df['group'] = dynamite_df.index.get_level_values(0)
sns.catplot(data=dynamite_df,y='group',x='correlation',kind='bar',orient='h',palette=my_cols,order=my_groups,aspect=1)
plt.axvline(0,color='k',linewidth=3)
plt.xlim(0.05,-0.05,-0.01)
sns.despine(left=True,trim=True)
plt.ylabel('')
plt.savefig('../reports/figures/10-dynamite-plot.png',dpi=300,bbox_inches='tight')
plt.show()
from scipy import stats
t,p = stats.ttest_ind(dynamite_df.loc['bilateral','correlation'],dynamite_df.loc['left','correlation'])
print('\nt=%.2f,p=%.64f'%(t,p))
t,p = stats.ttest_ind(dynamite_df.loc['bilateral','correlation'],dynamite_df.loc['right','correlation'])
print('\nt=%.2f,p=%.38f'%(t,p))
t,p = stats.ttest_ind(dynamite_df.loc['left','correlation'],dynamite_df.loc['right','correlation'])
print('\nt=%.2f,p=%.248f'%(t,p))
```
### as histogram
```
fig,ax = plt.subplots(1,1,figsize=(8,5))
for group in my_groups:
sns.distplot(stack_df.loc[group,:],color=my_cols[group],label=group,ax=ax)
plt.legend()
plt.xlim(0.4,-0.4,-0.2)
sns.despine()
plt.show()
```
### set up the classifier
```
clf = svm.SVC(kernel='linear',C=1.0,probability=False,class_weight='balanced')
def scale_features(X):
'''z-transform the features before applying a SVC.
The scaler is also stored so it can later be re-used on test data'''
my_scaler = preprocessing.StandardScaler()
my_scaler.fit(X)
X_scaled = my_scaler.transform(X)
return X_scaled,my_scaler
def encode_labels(y):
'''get from number labels to strings and back'''
my_labeler = preprocessing.LabelEncoder()
my_labeler.fit(np.unique(y))
y_labels = my_labeler.transform(y)
return y_labels, my_labeler
def train_classifier(df):
'''get features and labels
* scale the features
* transform the labels
* apply the classifier
'''
X = df.values
y = df.index.get_level_values(0)
X_scaled,my_scaler = scale_features(X)
y_labels, my_labeler = encode_labels(y)
clf.fit(X_scaled,y_labels)
return clf,my_scaler,my_labeler
example_clf, example_scaler, example_labeler = train_classifier(stack_df)
example_clf
example_scaler
example_labeler.classes_
def get_boundaries(clf,my_scaler):
'''find the point where the classifier changes its prediction;
this is an ugly brute-force approach and probably there is a much
easier way to do this
'''
d = {}
for i in np.linspace(-1,1,10000):
this_val = my_scaler.transform(np.array([i]).reshape(1,-1))
this_predict = clf.predict(this_val)
d[i] = this_predict[-1]
df = pd.DataFrame(d,index=['pred']).T
return df[(df-df.shift(1))!=0].dropna().index[1:]
from datetime import datetime
```
### get class boundaries of all folds
```
import tqdm
def get_all_boundaries(stack_df):
'''for each fold, get the boundaries, by
training on everybody but the held-out patient
and storing the boundaries'''
all_boundaries = {}
conclusive_pats = np.unique(stack_df.index.get_level_values(1))
for p in tqdm.tqdm(conclusive_pats):
# in the current fold, we drop one patient
df = stack_df.drop(p,level=1)
# train on this fold's data
clf,my_scaler,my_labeler = train_classifier(df)
# get the classifier boundaries
boundaries = get_boundaries(clf,my_scaler)
all_boundaries[p] = boundaries
return all_boundaries
```
Compute the boundaries and store them for later re-use:
```
all_boundaries = get_all_boundaries(stack_df)
bound_df = pd.DataFrame(all_boundaries).T
bound_df.tail()
bound_df.to_csv('../data/processed/csv/bound_df.csv')
```
To make things faster, we can re-load the computed boundaries here:
```
bound_df = pd.read_csv('../data/processed/csv/bound_df.csv',index_col=[0],header=0)
bound_df.tail()
```
rename so boundaries have meaningful descriptions:
```
bound_df = bound_df.rename(columns={'0':'B/R','1':'L/B'})
bound_df.tail()
bound_df.describe()
```
#### show the class boundaries overlaid on the data distribution
```
fig,ax = plt.subplots(1,1,figsize=(8,5))
for group in my_groups:
sns.distplot(stack_df.loc[group,:],color=my_cols[group],label=group,ax=ax)
for b in bound_df.values.flatten():
plt.axvline(b,alpha=0.1,color=color_dict['black'])
plt.legend()
plt.xlabel('correlation')
plt.ylabel('density')
plt.xlim(0.4,-0.4,-0.2)
plt.ylim(0,8)
plt.legend(loc=(0.65,0.65))
sns.despine(trim=True,offset=5)
plt.savefig('../reports/figures/10-distribution-plot.png',dpi=300,bbox_inches='tight')
plt.show()
```
#### make swarm/factorplot with boundary values
```
sns_df = pd.DataFrame(bound_df.stack())
sns_df.columns = ['correlation']
sns_df.loc[:,'boundary'] = sns_df.index.get_level_values(1)
sns_df.loc[:,'dummy'] = 0
sns_df.tail()
fig,ax = plt.subplots(1,1,figsize=(4,5))
sns.swarmplot(data=sns_df,
x='correlation',
y='dummy',
hue='boundary',
orient='h',
palette={'L/B':my_cols['left'],'B/R':my_cols['right']},
size=4,
alpha=0.9,
ax=ax
)
plt.xlim(0.04,-0.02,-0.02)
ax.set_ylabel('')
ax.set_yticks([])
sns.despine(left=True,trim=True)
plt.savefig('../reports/figures/10-boundary-swarm-plot.png',dpi=300,bbox_inches='tight')
plt.show()
```
### combine above into one plot
```
sns.set_style('dark')
fig = plt.figure(figsize=(16,6))
ax1 = fig.add_axes([0.36, .999, 1, .7], xticklabels=[], yticklabels=[])
ax1.imshow(Image.open('../reports/figures/10-dynamite-plot.png'))
ax2 = fig.add_axes([0, 1, 1, 0.8], xticklabels=[], yticklabels=[])
ax2.imshow(Image.open('../reports/figures/10-distribution-plot.png'))
ax3 = fig.add_axes([0.65, 1, 1, 0.8], xticklabels=[], yticklabels=[])
ax3.imshow(Image.open('../reports/figures/10-boundary-swarm-plot.png'))
plt.text(0,1, 'A',transform=ax2.transAxes, fontsize=32)
plt.text(1.04,1, 'B',transform=ax2.transAxes, fontsize=32)
plt.text(1.63,1, 'C',transform=ax2.transAxes, fontsize=32)
plt.savefig('../reports/figures/10-training-overview.png',dpi=300,bbox_inches='tight')
plt.show()
```
### make predictions for all patients (conc and inconc)
#### invert
```
all_inv_df = group_df*conds
all_inv_df.tail()
def make_preds(this_df,clf,my_scaler,my_labeler):
'''apply fitted classifier to the held-out patient;
based on what has been done during training, we
* scale the features using the stored scaler
* transform the labels using the stored labeler
* apply the classifier using the stored classfier
'''
scaled_features = my_scaler.transform(this_df.T)
predictions = clf.predict(scaled_features)
labeled_predictions = my_labeler.inverse_transform(predictions)
counts = pd.Series(labeled_predictions).value_counts()
counts_df = pd.DataFrame(counts).T
counts_df.index = pd.MultiIndex.from_tuples(this_df.index)
return counts_df
```
Example:
```
make_preds(all_inv_df.iloc[[-1]],example_clf, example_scaler, example_labeler)
import warnings
# this is necessary to get rid of https://github.com/scikit-learn/scikit-learn/issues/10449
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
for p in tqdm.tqdm(all_inv_df.index):
# get data in leave-one-out fashion
this_df = all_inv_df.loc[[p],:]
other_df = stack_df.drop(p[-1],level=1)
# train on this fold's data
clf,my_scaler,my_labeler = train_classifier(other_df)
# make predictions
p_df = make_preds(this_df,clf,my_scaler,my_labeler)
out_name = '../data/processed/csv/%s_counts_df.csv' % p[-1]
p_df.to_csv(out_name)
data_df.loc[p,'counts_df'] = out_name
data_df.to_csv('../data/processed/csv/info_epi_zscored_zdiff_summarymaps_2dpredclean_corr_counts_df.csv')
```
### train classifier once on all data and store
We store a classifer trained on all data as a pickle file so we can re-use it in the future on new data
```
clf,my_scaler,my_labeler = train_classifier(stack_df)
d = {'clf':clf,'scaler':my_scaler,'labeler':my_labeler}
with open('../models/volume_clf.p','wb') as f:
pickle.dump(d,f)
```
#### toolbox model
The toolbox assumes that a dataset used as input is a new dataset and was not part of this study
```
#clf_file = os.path.join(supDir,'models','volume_clf.p')
#with open(clf_file,'rb') as f:
# clf_dict = pickle.load(f)
#
#clf = clf_dict['clf']
#my_scaler = clf_dict['scaler']
#my_labeler = clf_dict['labeler']
#def make_p(pFolder,pName,clf=clf,my_scaler=my_scaler,my_labeler=my_labeler):
#
# filename = os.path.join(pFolder, ''.join([ pName, '_corr_df.csv']))
# this_df = pd.read_csv(filename, index_col=[0], header=0)
# this_df.index = [['correlations'],[pName]]
# inv_df = this_df*conds
# counts_df = make_preds(inv_df,clf,my_scaler,my_labeler)
#
# out_name = os.path.join(pFolder, ''.join([ pName, '_counts_df.csv']))
# counts_df.to_csv(out_name)
#
# return out_name
```
### summary
For each patient, a classfier has been developed based on all the other patient (Leave-One-Out) and applied to the 200 volumes of that patient. There are now 200 decisions for each patient, as many as there are volumes. These data are stored in csv files which we can now access to make a prediction on the level of the patient.
**************
< [Previous](09-mw-correlations-with-template.ipynb) | [Contents](00-mw-overview-notebook.ipynb) | [Next >](11-mw-logistic-regression.ipynb)
| github_jupyter |
```
import numpy as np
import tensorflow as tf
assert tf.__version__.startswith('2')
from tensorflow_examples.lite.model_maker.core.data_util.image_dataloader import ImageClassifierDataLoader
from tensorflow_examples.lite.model_maker.core.task import image_classifier
from tensorflow_examples.lite.model_maker.core.task.model_spec import efficientnet_lite2_spec, efficientnet_lite4_spec
from tensorflow_examples.lite.model_maker.core.task.model_spec import ImageModelSpec
import matplotlib.pyplot as plt
# from matplotlib.pyplot import specgram
# import librosa
# import librosa.display
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
import pandas as pd
tf.config.list_physical_devices('GPU')
def train_valid_fold_images(fold):
fold = str(fold)
base_path = "../downsampled/imagenet_structure/"
return ImageClassifierDataLoader.from_folder(base_path + folder + "/train"), ImageClassifierDataLoader.from_folder(base_path + fold + "/valid")
train_data, valid_data = train_valid_fold_images('1')
# Customize the pre-trained TensorFlow model
model = image_classifier.create(train_data, model_spec=efficientnet_lite2_spec)
# Evaluate the model
loss, accuracy = model.evaluate(valid_data)
```
# Lite4
```
# Customize the pre-trained TensorFlow model
model_lite4 = image_classifier.create(train_data,
model_spec=efficientnet_lite4_spec, warmup_steps = 100)
loss, accuracy = model_lite4.evaluate(valid_data)
model_lite4.export('UrbanEfficientNetLite4_out1_e5_noshuffle.tflite', 'urban_label_lite4.txt')
model_lite4 = image_classifier.create(train_data,
model_spec=efficientnet_lite4_spec,
shuffle = True,
batch_size = 24,
warmup_steps = 100,
epochs = 8)
loss, accuracy = model_lite4.evaluate(valid_data)
model_path_prefix = './models/UrbanEfficientNetLite4_val1_e8_shuffle'
model_lite4.export(model_path_prefix+'.tflite', model_path_prefix+'_labels.txt')
ds_test = model_lite4._gen_dataset(valid_data, 24, is_training=False)
ds_test
model_lite4.model.predict_classes
import inspect
print(inspect.getsource(model_lite4.model.predict_classes))
valid_predicts = model_lite4.predict_top_k(valid_data)
valid_label = [valid_data.index_to_label[label.numpy()] for i, (image, label) in enumerate(valid_data.dataset.take(len(predicts)))]
valid_predict_label = [i[0][0] for i in valid_predicts]
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
print(classification_report(y_true = valid_true_id, y_pred = valid_predict_id, labels = valid_data.index_to_label))
import pandas as pd
conf = confusion_matrix(y_true = valid_label, y_pred = valid_predict_label, labels = valid_data.index_to_label)
conf = pd.DataFrame(conf, columns = valid_data.index_to_label)
conf['true_row_label'] = valid_data.index_to_label
conf.set_index('true_row_label', drop = True, inplace = True)
conf_perc = round(conf.div(conf.sum(axis=1), axis=0),2)
conf_perc
def predict_class(data):
predict = model_lite4.predict_top_k(data)
return [i[0][0] for i in predict]
#e12
print(classification_report(y_true = valid_label, y_pred = valid_predict_label))
model_lite4 = image_classifier.create(train_data,
model_spec=efficientnet_lite4_spec,
shuffle = True,
batch_size = 24,
warmup_steps = 100,
epochs = 20)
loss, accuracy = model_lite4.evaluate(valid_data)
model_path_prefix = './models/UrbanEfficientNetLite4_val1_e20_shuffle'
model_lite4.export(model_path_prefix+'.tflite', model_path_prefix+'_labels.txt')
valid_predicts = model_lite4.predict_top_k(valid_data)
valid_label = [valid_data.index_to_label[label.numpy()] for i, (image, label) in enumerate(valid_data.dataset.take(len(valid_predicts)))]
valid_predict_label = [i[0][0] for i in valid_predicts]
#e20
print(classification_report(y_true = valid_label, y_pred = valid_predict_label))
conf = confusion_matrix(y_true = valid_label, y_pred = valid_predict_label, labels = valid_data.index_to_label)
conf = pd.DataFrame(conf, columns = valid_data.index_to_label)
conf['true_row_label'] = valid_data.index_to_label
conf.set_index('true_row_label', drop = True, inplace = True)
conf_perc = round(conf.div(conf.sum(axis=1), axis=0),2)
conf_perc
```
# Multiple Models
```
def train_valid_folder_images(folder):
folder = str(folder)
base_path = "../downsampled/imagenet_structure/"
return ImageClassifierDataLoader.from_folder(base_path + folder + "/train"), ImageClassifierDataLoader.from_folder(base_path + folder + "/valid")
def create_fit_submodel(image_folder_substring, epochs = 10, warmup_steps = 100, batch_size = 24):
train_data, valid_data = train_valid_folder_images(image_folder_substring)
return image_classifier.create(train_data,
model_spec=efficientnet_lite4_spec,
shuffle = True,
epochs = epochs,
batch_size = batch_size,
warmup_steps = warmup_steps,
validation_data = valid_data)
np.setdiff1d(labels,submodels['engine-air-other'],True).tolist()
import os
import glob
import shutil
from pathlib import Path
"/path"+os.listdir()
data_path = Path('../downsampled/imagenet_structure/ensemble/')
submodels = {'motors-other': ['air_conditioner', 'engine_idling','drilling', 'jackhammer'],}
labels = ['air_conditioner','car_horn','children_playing',
'dog_bark','drilling','engine_idling','gun_shot','jackhammer','siren','street_music']
def move_submodel_files(submodel_folder_name, submodel_class_list):
for d in ['train', 'valid']:
if not os.path.exists(data_path/submodel_folder_name/d/'motors'):
os.mkdir(data_path/submodel_folder_name/d/'motors')
for c in submodel_class_list: #np.setdiff1d(labels,submodel_class_list,True).tolist():
png_files = list(Path(data_path/submodel_folder_name/d/c).glob('*.png'))
for f in png_files:
shutil.move(str(f), str(data_path/submodel_folder_name/d/'motors'))
os.rmdir(data_path/submodel_folder_name/d/c)
for k, v in submodels.items():
print(k)
move_submodel_files(k,v)
```
## Drill-Jackhammer
```
model_drill_jackhammer-other = create_fit_submodel('ensemble/drilling-jackhammer-other', epochs = 20)
model_drill_jackhammer.model.save('models/ensemble/drill_jackhammer_e15')
loss, accuracy = model_drill_jackhammer.evaluate(valid_data)
model_path_prefix = './models/UrbanDrillJackhammerEfficientNet'
model_drill_jackhammer.export(model_path_prefix+'.tflite', model_path_prefix+'_labels.txt')
valid_predicts = model_drill_jackhammer.predict_top_k(valid_data)
valid_label = [valid_data.index_to_label[label.numpy()] for i, (image, label) in enumerate(valid_data.dataset.take(len(valid_predicts)))]
valid_predict_label = [i[0][0] for i in valid_predicts]
#e20
print(classification_report(y_true = valid_label, y_pred = valid_predict_label))
```
## Engine-Air
```
train_data, valid_data = train_valid_folder_images('ensemble/engine-air')
model_engine_air = image_classifier.create(train_data,
model_spec=efficientnet_lite4_spec,
shuffle = True,
batch_size = 24,
warmup_steps = 100,
epochs = 10)
loss, accuracy = model_engine_air.evaluate(valid_data)
model_path_prefix = './models/UrbanEngineAirEfficientNet'
model_engine_air.export(model_path_prefix+'.tflite', model_path_prefix+'_labels.txt')
valid_predicts = model_engine_air.predict_top_k(valid_data)
valid_label = [valid_data.index_to_label[label.numpy()] for i, (image, label) in enumerate(valid_data.dataset.take(len(valid_predicts)))]
valid_predict_label = [i[0][0] for i in valid_predicts]
#e20
print(classification_report(y_true = valid_label, y_pred = valid_predict_label))
```
## Other Classes
```
train_data, valid_data = train_valid_folder_images('ensemble/other')
model_other = image_classifier.create(train_data,
model_spec=efficientnet_lite4_spec,
shuffle = True,
batch_size = 24,
warmup_steps = 100,
epochs = 10)
loss, accuracy = model_other.evaluate(valid_data)
model_path_prefix = './models/UrbanOtherEfficientNet'
model_other.export(model_path_prefix+'.tflite', model_path_prefix+'_labels.txt')
valid_predicts = model_other.predict_top_k(valid_data)
valid_label = [valid_data.index_to_label[label.numpy()] for i, (image, label) in enumerate(valid_data.dataset.take(len(valid_predicts)))]
valid_predict_label = [i[0][0] for i in valid_predicts]
#e20
print(classification_report(y_true = valid_label, y_pred = valid_predict_label))
```
# Import Tflite models & Ensemble
```
from PIL import Image
class urban_ensemble():
def __init__(tflite_path_dict):
self.path_dict = tflite_path_dict
def _load_tflite_model(model_path, label_path):
with tf.io.gfile.GFile('model_path', 'rb') as f:
model_content = f.read()
with tf.io.gfile.GFile('label_path', 'r') as f:
label_names = f.read().split('\n')
interpreter = tf.lite.Interpreter(model_content = model_content)
input_index = interpreter.get_input_details()[0]['index']
output = interpreter.tensor(interpreter.get_output_details()[0]["index"])
imtest = Image.open('7061-6-0-0.png')
imtest = np.asarray(imtest)/255
tf.lite.
def load_labels(path):
with open(path, 'r') as f:
return {i: line.strip() for i, line in enumerate(f.readlines())}
def set_input_tensor(interpreter, image):
tensor_index = interpreter.get_input_details()[0]['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image
def classify_image(interpreter, image_array, top_k=1):
"""Returns a sorted array of classification results."""
interpreter.set_tensor(input_details[0]['index'], input_array)
interpreter.invoke()
output_details = interpreter.get_output_details()[0]
output = np.squeeze(interpreter.get_tensor(output_details['index']))
# If the model is quantized (uint8 data), then dequantize the results
if output_details['dtype'] == np.uint8:
scale, zero_point = output_details['quantization']
output = scale * (output - zero_point)
ordered = np.argpartition(-output, top_k)
return [(i, output[i]) for i in ordered[:top_k]]
##################################################
class UrbanInterpreter():
def __init__(model_dict):
self.model_files_dict = model_files_dict
def _read_tflite_model(model_path):
with tf.io.gfile.GFile(model_path, 'rb') as f:
return f.read()
def _read_tflite_labels(label_path)
with tf.io.gfile.GFile(label_path, 'r') as f:
return f.read().split('\n')
def _initialize_interpreter(model_files, label_names):
model_content = _read_tflite_model(model_files(['tflite_file']))
label_names = _read_tflite_labels(model_files(['labels']))
interpreter = tf.lite.Interpreter(model_content= model_content)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]['index']
output_index = interpreter.get_output_details()[0]["index"]
return interpreter
def urban_ensemble_predict(model_dict, image_path):
labels= load_labels(labels)
interpreter = tf.lite.Interpreter(model)
interpreter.allocate_tensors()
_, height, width, _ = interpreter.get_input_details()[0]['shape']
image = np.asarray(Image.open(image_path).resize((width, height)))/255
start_time = time.time()
results = classify_image(interpreter, image)
label_id, prob = results[0]
finally:
camera.stop_preview()
# Read TensorFlow Lite model from TensorFlow Lite file.
with tf.io.gfile.GFile('flower_classifier.tflite', 'rb') as f:
model_content = f.read()
# Read label names from label file.
with tf.io.gfile.GFile('flower_labels.txt', 'r') as f:
label_names = f.read().split('\n')
# Initialze TensorFlow Lite inpterpreter.
interpreter = tf.lite.Interpreter(model_content=model_content)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]['index']
output = interpreter.tensor(interpreter.get_output_details()[0]["index"])
# Run predictions on each test image data and calculate accuracy.
accurate_count = 0
for i, (image, label) in enumerate(test_data.dataset):
# Pre-processing should remain the same. Currently, just normalize each pixel value and resize image according to the model's specification.
image, _ = model.preprocess(image, label)
# Add batch dimension and convert to float32 to match with the model's input
# data format.
image = tf.expand_dims(image, 0).numpy()
# Run inference.
interpreter.set_tensor(input_index, image)
interpreter.invoke()
# Post-processing: remove batch dimension and find the label with highest
# probability.
predict_label = np.argmax(output()[0])
# Get label name with label index.
predict_label_name = label_names[predict_label]
accurate_count += (predict_label == label.numpy())
accuracy = accurate_count * 1.0 / test_data.size
print('TensorFlow Lite model accuracy = %.4f' % accuracy)
```
| github_jupyter |
# BOW Featurization and modeling
```
import pandas as pd
import numpy as np
import re
from tqdm import tqdm
import warnings
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from joblib import dump
from sklearn.model_selection import train_test_split
warnings.filterwarnings("ignore")
data = pd.read_csv("AFFR_preprocessed_100k.csv")
data.tail(3)
def text_splitter(text):
return text.split()
# max_features = 20000 means we want only most useful(most occured) 20000 features not all
vectorizer = CountVectorizer(tokenizer = text_splitter,ngram_range=(1, 3),max_features=20000,min_df=5, max_df=0.7)
review_vector = vectorizer.fit_transform(data['Reviews'].values.astype(str))
dump(vectorizer,"AFFR_vectorizer.pkl")
review_vector.shape
# Getting labels seperate
y_label = data["rating"]
x_train, x_test, y_train, y_test = train_test_split(review_vector, y_label, test_size = 0.20)
```
### SVM Classifier with RBF kernel
```
%%time
svmclassifier = SVC(kernel='rbf',verbose=True,gamma="auto")
svmclassifier.fit(x_train, y_train)
y_pred = svmclassifier.predict(x_test)
print("Confusion matrix: \n",confusion_matrix(y_test,y_pred))
print("Classification report: \n",classification_report(y_test,y_pred))
print("Accuracy score is: ",accuracy_score(y_test,y_pred))
print("Model Saving ...")
dump(svmclassifier,"AFFR_SVM_model.pkl")
```
### Naive Bayes
```
%%time
# Here Gaussian Naive Bayes does not taking Sparse matrix it requires dense
NB_classifier = MultinomialNB()
NB_classifier.fit(x_train, y_train)
y_pred = NB_classifier.predict(x_test)
print("Confusion matrix: \n",confusion_matrix(y_test,y_pred))
print("Classification report: \n",classification_report(y_test,y_pred))
print("Accuracy score is: ",accuracy_score(y_test,y_pred))
print("Model Saving ...")
dump(NB_classifier,"AFFR_NB_model.pkl")
%%time
LR_classifier = LogisticRegression(n_jobs=-1)
LR_classifier.fit(x_train, y_train)
y_pred = LR_classifier.predict(x_test)
print("Confusion matrix: \n",confusion_matrix(y_test,y_pred))
print("Classification report: \n",classification_report(y_test,y_pred))
print("Accuracy score is: ",accuracy_score(y_test,y_pred))
print("Model Saving ...")
dump(LR_classifier,"AFFR_LR_model.pkl")
%%time
KNN_classifier = KNeighborsClassifier(n_jobs=-1)
KNN_classifier.fit(x_train, y_train)
y_pred = KNN_classifier.predict(x_test)
print("Confusion matrix: \n",confusion_matrix(y_test,y_pred))
print("Classification report: \n",classification_report(y_test,y_pred))
print("Accuracy score is: ",accuracy_score(y_test,y_pred))
print("Model Saving ...")
dump(KNN_classifier,"AFFR_KNN_model.pkl")
%%time
RF_classifier = RandomForestClassifier(n_jobs=-1)
RF_classifier.fit(x_train, y_train)
y_pred = RF_classifier.predict(x_test)
print("Confusion matrix: \n",confusion_matrix(y_test,y_pred))
print("Classification report: \n",classification_report(y_test,y_pred))
print("Accuracy score is: ",accuracy_score(y_test,y_pred))
print("Model Saving ...")
dump(RF_classifier,"AFFR_RF_model.pkl")
%%time
DT_classifier = DecisionTreeClassifier()
DT_classifier.fit(x_train, y_train)
y_pred = DT_classifier.predict(x_test)
print("Confusion matrix: \n",confusion_matrix(y_test,y_pred))
print("Classification report: \n",classification_report(y_test,y_pred))
print("Accuracy score is: ",accuracy_score(y_test,y_pred))
print("Model Saving ...")
dump(DT_classifier,"AFFR_DT_model.pkl")
```
| github_jupyter |
# DataFrames
We can think of a DataFrame as a bunch of Series objects put together to share the same index. Let's use pandas to explore this topic!
```
import pandas as pd
import numpy as np
from numpy.random import randn
np.random.seed(101)
df = pd.DataFrame(randn(5,4),index='A B C D E'.split(),columns='W X Y Z'.split())
df
```
## Selection and Indexing
Let's learn the various methods to grab data from a DataFrame
```
df['W']
# Pass a list of column names
df[['W','Z']]
# SQL Syntax (NOT RECOMMENDED!)
df.W
```
DataFrame Columns are just Series
```
type(df['W'])
```
**Creating a new column:**
```
df['new'] = df['W'] + df['Y']
df
```
** Removing Columns**
```
df.drop('new',axis=1)
# Not inplace unless specified!
df
df.drop('new',axis=1,inplace=True)
df
```
Can also drop rows this way:
```
df.drop('E',axis=0)
```
** Selecting Rows**
```
df.loc['A']
```
Or select based off of position instead of label
```
df.iloc[2]
```
** Selecting subset of rows and columns **
```
df.loc['B','Y']
df.loc[['A','B'],['W','Y']]
```
### Conditional Selection
An important feature of pandas is conditional selection using bracket notation, very similar to numpy:
```
df
df>0
df[df>0]
df[df['W']>0]
df[df['W']>0]['Y']
df[df['W']>0][['Y','X']]
```
For two conditions you can use | and & with parenthesis:
```
df[(df['W']>0) & (df['Y'] > 1)]
```
## More Index Details
Let's discuss some more features of indexing, including resetting the index or setting it something else. We'll also talk about index hierarchy!
```
df
# Reset to default 0,1...n index
df.reset_index()
newind = 'CA NY WY OR CO'.split()
df['States'] = newind
df
df.set_index('States')
df
df.set_index('States',inplace=True)
df
```
## Multi-Index and Index Hierarchy
Let us go over how to work with Multi-Index, first we'll create a quick example of what a Multi-Indexed DataFrame would look like:
```
# Index Levels
outside = ['G1','G1','G1','G2','G2','G2']
inside = [1,2,3,1,2,3]
hier_index = list(zip(outside,inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
hier_index
df = pd.DataFrame(np.random.randn(6,2),index=hier_index,columns=['A','B'])
df
```
Now let's show how to index this! For index hierarchy we use df.loc[], if this was on the columns axis, you would just use normal bracket notation df[]. Calling one level of the index returns the sub-dataframe:
```
df.loc['G1']
df.loc['G1'].loc[1]
df.index.names
df.index.names = ['Group','Num']
df
df.xs('G1')
df.xs(['G1',1])
df.xs(1,level='Num')
```
| github_jupyter |
# Problem set 5: Writing your own algorithms
[<img src="https://mybinder.org/badge_logo.svg">](https://mybinder.org/v2/gh/NumEconCopenhagen/exercises-2020/master?urlpath=lab/tree/PS5/problem_set_5.ipynb)
This problem set has no tasks, but only problems of increasing complexity. See how far you can get :)
```
import math
```
# Factorial
Remember that the factorial of $n$ is
$$
n\cdot(n-1)\cdot(n-2)\cdot...\cdot 1
$$
**Problem:** Correct the following function so that it returns the factorial of n using *functional recursion*.
```
def factorial(n):
if n == 1:
return 1
else:
return n # + missing code
print(factorial(5))
```
**Answer:**
```
def factorial(n):
if n == 1:
return 1
else:
return n*factorial(n-1)
for n in [1,2,3,4,5]:
y = factorial(n)
print(f'the factorial of {n} is {y}')
assert(y == math.factorial(n))
```
# Descending bubble sort
**Problem:** Sort a list of numbers in-place descending (from high to low).
**Inputs:** List of numbers.
**Outputs:** None.
**Algorithm:**
```
L = [54, 26, 93, 17, 77, 31, 44, 55, 20] # test list
# write your code here (hint: use the bubble_sort() algorithm from the lectures)
```
**Answer:**
```
def swap(L,i,j):
temp = L[i] # save value at i
L[i] = L[j] # overwrite value at i with value at j
L[j] = temp # write original value at i to value at j
def bubble_sort(L):
for k in range(len(L)-1,0,-1):
for i in range(k):
if L[i] < L[i+1]:
swap(L,i,i+1)
bubble_sort(L)
print('sorted',L)
```
# Linear search for index
**Problem:** Consider a number `x` and a sorted list of numbers `L`. Assume `L[0] <= x < L[-1]`. Find the index `i` such that `L[i] <= x < L[i+1]` using a linear search.
**Inputs:** A sorted list of numbers `L` and a number `x`.
**Outputs:** Integer.
```
L = [0, 1, 2, 8, 13, 17, 19, 32, 42] # test list
# write your code here (hint: use the linear_seach() algorithm from the lecture)
```
**Answer:**
```
def linear_search(L,x):
# a. prep
i = 0
N = len(L)
found = False
# b. main
while i < N-1 and not found:
if x >= L[i] and x < L[i+1]: # comparison
found = True
else:
i += 1 # increment
# c. return
return i
# test
for x in [3,7,13,18,32]:
i = linear_search(L,x)
print(f'{x} gives the index {i}')
assert(x >= L[i] and x < L[i+1]),(x,i,L[i])
```
# Bisection
**Problem:** Find an (apporximate) solution to $f(x) = 0$ in the interval $[a,b]$ where $f(a)f(b) < 0$ (i.e. one is positive and the other is negative).
> If $f$ is a *continuous* function then the intermediate value theorem ensures that a solution exists.
**Inputs:** Function $f$, float interval $[a,b]$, float tolerance $\epsilon > 0$.
**Outputs:** Float.
**Algorithm:** `bisection()`
1. Set $a_0 = a$ and $b_0 = b$.
2. Compute $f(m_0)$ where $m_0 = (a_0 + b_0)/2$ is the midpoint
3. Determine the next sub-interval $[a_1,b_1]$:
i. If $f(a_0)f(m_0) < 0$ then $a_1 = a_0$ and $b_1 = m_0$
ii. If $f(m_0)f(b_0) < 0$ then $a_1 = m_0$ and $b_1 = b_0$
4. Repeat step 2 and step 3 until $|f(m_n)| < \epsilon$
```
f = lambda x: (2.1*x-1.7)*(x-3.3) # test function
def bisection(f,a,b,tau):
pass
# write your code here
result = bisection(f,0,1,1e-8)
print(result)
```
**Answer:**
```
def bisection(f,a,b,tol=1e-8):
""" bisection
Solve equation f(x) = 0 for a <= x <= b.
Args:
f (function): function
a (float): left bound
b (float): right bound
tol (float): tolerance on solution
Returns:
"""
# test inputs
if f(a)*f(b) >= 0:
print("bisection method fails.")
return None
# step 1: initialize
a_n = a
b_n = b
# step 2-4:
while True:
# step 2: midpoint and associated value
m_n = (a_n+b_n)/2
f_m_n = f(m_n)
# step 3: determine sub-interval
if abs(f_m_n) < tol:
return m_n
elif f(a_n)*f_m_n < 0:
a_n = a_n
b_n = m_n
elif f(b_n)*f_m_n < 0:
a_n = m_n
b_n = b_n
else:
print("bisection method fails.")
return None
return (a_n + b_n)/2
result = bisection(f,0,1,1e-8)
print(f'result is {result:.3f} with f({result:.3f}) = {f(result):.16f}')
```
# Find prime numbers (hard)
**Goal:** Implement a function in Python for the sieve of Eratosthenes.
The **sieve of Eratosthenes** is a simple algorithm for finding all prime numbers up to a specified integer. It was created by the ancient Greek mathematician Eratosthenes. The algorithm to find all the prime numbers less than or equal to a given integer $n$.
**Algorithm:** `sieve_()`
1. Create a list of integers from $2$ to $n$: $2, 3, 4, ..., n$ (all potentially primes)
`primes = list(range(2,n+1))`
2. Start with a counter $i$ set to $2$, i.e. the first prime number
3. Starting from $i+i$, count up by $i$ and remove those numbers from the list, i.e. $2i$, $3i$, $4i$ etc.
`primes.remove(i)`
4. Find the first number of the list following $i$. This is the next prime number.
5. Set $i$ to the number found in the previous step.
6. Repeat steps 3-5 until $i$ is greater than $\sqrt {n}$.
7. All the numbers, which are still in the list, are prime numbers.
**A more detailed explanation:** See this [video](https://www.youtube.com/watch?v=klcIklsWzrY&feature=youtu.be)
```
def sieve(n):
pass # write your code here
print(sieve(100))
```
**Answer:**
```
def sieve(n):
""" sieve of Eratosthenes
Return all primes between 2 and n.
Args:
n (integer): maximum number to consider
"""
# a. step 1: create list of potential primes
primes = list(range(2,n+1))
# b. step 2: initialize i
index = 0
i = primes[index]
# c. step 3-6
while i < math.sqrt(n):
# step 3: remove
k = i
while i <= n:
i += k
if i in primes:
primes.remove(i)
# step 4: next number
index += 1
# step 5: set i
i = primes[index]
return primes
print('primes from 2 to 100:',sieve(100))
```
# More Problems
See [Project Euler](https://projecteuler.net/about).
| github_jupyter |
# Segundo parcial tema A
__U.N.L.Z. - Facultad de Ingenierรญa__
__Electrotecnia__
__Alumno:__ Daniel Antonio Lorenzo
<mark><strong>(Resoluciรณn en python3)</strong></mark>
<a href="https://colab.research.google.com/github/daniel-lorenzo/Electrotecnia/blob/master/Ejercitacion/2do_parc_tema_A.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
## Pregunta 1
<img src="img/2doparc_A-01.png">
### Resoluciรณn
__Datos:__
$\left\{
\begin{array}{l}
S_N = 11000 \, \mathrm{VA} \\
U_{N1} = 400 \, \mathrm{V} \\
U_{N2} = 200 \, \mathrm{V}
\end{array}
\right. \qquad \qquad$
__Ensayo de cortocircuito:__
$\left\{
\begin{array}{l}
P_{cc} = 300 \, \mathrm{W} \\
U_{cc} = 20 \, \mathrm{V} \\
I_{cc} = 27,5 \, \mathrm{A}
\end{array}
\right.$
__Corrientes nominales:__
$$ I_{N1} = \frac{S_N}{U_{N1}} \qquad ; \qquad I_{N2} = \frac{S_N}{U_{N2}} $$
__Relaciรณn de transformaciรณn:__
$$ \mathrm{a} = \frac{U_{N1}}{U_{N2}} $$
__Ensayo de corto circuito:__
$$ Z_{eq1} = \frac{U_{cc}}{I_{cc}} $$
$$ P_{cc} = I_{cc} U_{cc} \cos \varphi_{cc} \quad \rightarrow \quad \varphi_{cc} = \arccos \left( \frac{P_{cc}}{I_{cc} U_{cc}} \right) $$
__Parรกmetros longitudinales:__
$$\begin{array}{|l|l|}
\hline
\mbox{Primario} & \mbox{Secundario} \\
\hline
R_{eq1} = Z_{eq1} \cos \varphi_{cc} & R_{eq2} = R_{eq1}/\mathrm{a}^2 \\
X_{eq1} = Z_{eq1} \sin \varphi_{cc} & X_{eq2} = X_{eq1}/\mathrm{a}^2 \\
\hline
\end{array}$$
```
import numpy as np # importa biblioteca numpy
# Datos:
SN = 11000 # [VA] Potencia nominal aparente
UN1 = 400 # [V] Voltaje nominal primario
UN2 = 200 # [V] Voltaje nominal secundario
Pcc = 300 # [W] Potencia de cortocircuito
Ucc = 20 # [V] Voltaje de cortocircuito
Icc = 27.5 # [A] Corriente de cortocircuito
# Cรกlculos:
IN1 = SN/UN1 # [A] Corriente nominal 1
IN2 = SN/UN2 # [A] Corriente nominal 2
a = UN1/UN2 # Relaciรณn de transformaciรณn
# Ensayo de cortocircuito:
Zeq1 = Ucc/Icc
phi_cc = np.arccos(Pcc/(Icc*Ucc))
# Parรกmetros longitudinales
# Primario # Secundario
Req1 = Zeq1*np.cos(phi_cc) ; Req2 = Req1/a**2
Xeq1 = Zeq1*np.sin(phi_cc) ; Xeq2 = Xeq1/a**2
print('Resultado:')
print('Req2 = %.3f Ohm ; Xeq2 = %.3f Ohm'%(Req2,Xeq2))
```
## Pregunta 2
<img src="img/2doparc_A-02.png">
### Resoluciรณn
__Ensayo de vacรญo:__
$\left\{
\begin{array}{l}
P_0 = 110 \, \mathrm{W} \\
U_0 = 200 \, \mathrm{V} \\
I_0 = 2 \, \mathrm{A}
\end{array}
\right.$
__Cรกlculo de $\varphi_0$__
$$ P_0 = I_0 U_0 \cos \varphi_0 \qquad \rightarrow \qquad \varphi_0 = \arccos \left( \frac{P_0}{I_0 U_0} \right) $$
$\begin{array}{l}
I_p = I_0 \cos \varphi_0 \\
I_m = I_0 \sin \varphi_0
\end{array}$
$$\begin{array}{|l|l|}
\hline
\mbox{Secundario (BT)} & \mbox{Primario (AT)} \\
\hline
R_{p2} = U_0 / I_p & R_{p1} = R_{p2} \cdot \mathrm{a}^2 \\
X_{m2} = U_0 / I_m & X_{eq2} = X_{m1} \cdot \mathrm{a}^2 \\
\hline
\end{array}$$
```
# Datos
Po = 110 # [W]
Uo = 200 # [V]
Io = 2 # [A]
# Ensayo de vacรญo
phi_o = np.arccos(Po/(Io*Uo))
Ip = Io*np.cos(phi_o)
Im = Io*np.sin(phi_o)
# Secundario (BT) # Primario (AT)
Rp2 = Uo/Ip ; Rp1 = Rp2*a**2
Xm2 = Uo/Im ; Xm1 = Xm2*a**2
print('Resultado:')
print('Rp2 = %.2f Ohm ; Xm2 = %.2f Ohm'%(Rp2,Xm2))
```
## Pregunta 3
<img src="img/2doparc_A-03.png">
### Resoluciรณn
__Datos:__
$\left\{
\begin{array}{l}
\mathrm{fp_{reg}} = 0,7 \\
\mathrm{fp_{rend} = 0,7}
\end{array}
\right.$
__Regulaciรณn:__
$$\begin{array}{lcl}
\mathrm{fp_{reg}} = \cos \varphi_\mathrm{reg} & \quad \Rightarrow \quad & \varphi_\mathrm{reg} = \arccos \mathrm{fp_{reg}} \\
I_\mathrm{2reg} = \mathrm{fp_{reg}} I_{N2} & \quad \wedge \quad & U_{20} = U_0
\end{array}$$
__Tensiรณn de salida o aplicada a la carga:__
$$ U_\mathrm{2reg,ind} = U_{20} - I_\mathrm{2reg} (R_{eq2} \cos \varphi_\mathrm{reg} + X_{eq2} \sin \varphi_\mathrm{reg}) $$
__Regulaciรณn:__
$$ \mathrm{reg_{ind}} = \mathrm{ \frac{U_{20} - U_{2reg,ind}}{U_{20}} \times 100} $$
```
# Datos:
fp_reg = 0.7
fp_rend = 0.7
# Cรกlculos de regulaciรณn
phi_reg = np.arccos(fp_reg)
I2_reg = fp_reg*IN2 ; U20 = Uo
# Tensiรณn de salida o aplicada a la carga:
U2reg_ind = U20 - I2_reg*(Req2*np.cos(phi_reg) + Xeq2*np.sin(phi_reg))
# Regulaciรณn
reg_ind = (U20 - U2reg_ind)/U20*100
print('Resultado:')
print('reg_ind = %.2f'%reg_ind)
```
## Pregunta 4
<img src="img/2doparc_A-04.png">
### Resoluciรณn
__Datos:__
$\left\{
\begin{array}{l}
\mathrm{fp_{reg}} = 0,7 \\
\mathrm{fp_{rend} = 0,8}
\end{array}
\right.$
__Regulaciรณn:__
$$\begin{array}{lcl}
\mathrm{fp_{reg}} = \cos \varphi_\mathrm{reg} & \quad \Rightarrow \quad & \varphi_\mathrm{reg} = \arccos \mathrm{fp_{reg}} \\
I_\mathrm{2reg} = \mathrm{fp_{reg}} I_{N2} & \quad \wedge \quad & U_{20} = U_0
\end{array}$$
__Tensiรณn de salida o aplicada a la carga:__
$$ U_\mathrm{2reg,ind} = U_{20} - I_\mathrm{2reg} (R_{eq2} \cos \varphi_\mathrm{reg} + X_{eq2} \sin \varphi_\mathrm{reg}) $$
__Regulaciรณn:__
$$ \mathrm{reg_{ind}} = \mathrm{ \frac{U_{20} - U_{2reg,ind}}{U_{20}} \times 100} $$
__Cรกlculos de rendimiento__
$$ \mathrm{fp_{rend}} = \cos \mathrm{\varphi_{rend}} \qquad \rightarrow \qquad \mathrm{\varphi_{rend} = \arccos fp_{rend}} $$
$I_\mathrm{2rend} = I_{N2}$
$P_{cu} = I_\mathrm{2rend}^2 R_{eq2}$
__Rendimiento para el caso inductivo__
$$ U_\mathrm{2rend,ind} = U_{20} - I_\mathrm{2rend} (R_{eq2} \cos \varphi_\mathrm{rend} + X_{eq2} \sin \varphi_\mathrm{rend}) $$
$$ \eta_\mathrm{ind} = \frac{U_\mathrm{2rend,ind} I_\mathrm{2rend} \cos \varphi_\mathrm{rend}}{U_\mathrm{2rend,ind} I_\mathrm{2rend} \cos \varphi_\mathrm{rend} + P_{cu} + P_0} $$
```
# Datos:
fp_reg = 0.7
fp_rend = 0.8
# Cรกlculos de regulaciรณn
phi_reg = np.arccos(fp_reg)
I2_reg = fp_reg*IN2 ; U20 = Uo
# Tensiรณn de salida o aplicada a la carga:
U2reg_ind = U20 - I2_reg*(Req2*np.cos(phi_reg) + Xeq2*np.sin(phi_reg))
# Regulaciรณn
reg_ind = (U20 - U2reg_ind)/U20*100
# Cรกlculos de rendimiento
phi_rend = np.arccos(fp_rend)
I2_rend = IN2
Pcu = I2_rend**2*Req2
# Rendimiento para el caso inductivo:
U2rend_ind = U20 - I2_rend*(Req2*np.cos(phi_rend) + Xeq2*np.sin(phi_rend))
n_ind = (U2rend_ind*I2_rend*np.cos(phi_rend))/(U2rend_ind*I2_rend*np.cos(phi_rend) + Pcu + Po)
print('Resultado:')
print('n_ind = %.2f'%n_ind)
```
## Pregunta 5
<img src="img/2doparc_A-05.png">
### Resoluciรณn
__Dato:__ $\quad \rightarrow \quad \mathrm{fp_{max} = 1}$
$$ \mathrm{fp_{max} = \cos \varphi_{max}} \qquad \rightarrow \qquad \varphi_\mathrm{max} = \arccos \mathrm{fp_{max}} $$
$$ I_\mathrm{2max} = \sqrt{ \frac{P_0}{R_{eq2}} } $$
$P_\mathrm{cu,max} = I_\mathrm{2max}^2 R_{eq2}$
$$ U_\mathrm{2,max} = U_{N2} - I_\mathrm{2max} (R_{eq2} \cos \varphi_\mathrm{max} + X_\mathrm{eq2} \underbrace{ \sin \varphi_\mathrm{max} }_{\rightarrow \, 0}) $$
__Rendimiento mรกximo:__
$$ \eta_\mathrm{max} = \frac{U_\mathrm{2max} I_\mathrm{2max} \mathrm{fp_{max}}}{U_\mathrm{2max} I_\mathrm{2max} \mathrm{fp_{max}} + P_\mathrm{cu,max} + P_0} $$
```
# Dato:
fp_max = 1 # factor de potencia mรกximo, (resistivo puro)
phi_max = np.arccos(fp_max)
I2_max = np.sqrt(Po/Req2)
Pcu_max = I2_max**2*Req2
U2_max = UN2 - I2_max*(Req2*np.cos(phi_max) + Xeq2*np.sin(phi_max))
n_max = (U2_max*I2_max*fp_max)/(U2_max*I2_max*fp_max + Pcu_max + Po)
print('Resulatado:')
print('n_max = %.3f'%n_max)
```
## Pregunta 6
<img src="img/2doparc_A-06.png">
## Pregunta 7
<img src="img/2doparc_A-07.png">
$$ S = \sqrt{3} \cdot U_\mathrm{Linea} I_\mathrm{Linea} $$
## Pregunta 8
<img src="img/2doparc_A-08.png">
## Pregunta 9
<img src="img/2doparc_A-09.png">
## Pregunta 10
<img src="img/2doparc_A-10.png">
### Resoluciรณn
$$ S_t^2 = (P_r + P_s + P_t)^2 + (Q_r + Q_s + Q_t)^2 $$
$$ 100^2 = (11 + 11 + P_t)^2 + (11 + 11 + 11)^2 $$
$$ 100^2 = (22 + P_t)^2 + (33)^2 $$
$$ P_t = \sqrt{(100)^2 - (33)^2} - 22 $$
$$ P_t = 72,398 \, \mathrm{kW} $$
```
Pt = np.sqrt(100**2 - 33**2) - 22
print('Resultado:')
print('Pt = %.3f kW'%(Pt))
```
----------
<a href="https://colab.research.google.com/github/daniel-lorenzo/Electrotecnia/blob/master/Ejercitacion/2do_parc_tema_A.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
| github_jupyter |
# Imports
```
from pathlib import Path
import mlflow
import mlflow.spark
import pandas as pd
from pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorAssembler
from pyspark.ml.regression import LinearRegression, RandomForestRegressor
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml import Pipeline, PipelineModel
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('ML in Spark').getOrCreate()
spark
DATA_PATH = Path('data')
!ls {DATA_PATH}
```
# Data
```
df = (spark
.read
.parquet((DATA_PATH / 'sf-airbnb-clean.parquet').as_posix()))
df.count()
df.columns
df.select("neighbourhood_cleansed", "room_type", "bedrooms", "bathrooms",
"number_of_reviews", "price").show(5)
train_df, test_df = df.randomSplit([.8, .2], seed=42)
train_df.count(), test_df.count()
train_df.dtypes
```
# Linear Regression
```
cat_cols = [field for (field, dtype) in train_df.dtypes if dtype == 'string']
num_cols = [field for (field, dtype) in train_df.dtypes if dtype != 'string' and field != 'price']
print(f'Categorical fields:\n{cat_cols}\n')
print(f'Numerical fields:\n{num_cols}')
indexed_cols = [col + '_indexed' for col in cat_cols]
encoded_cols = [col + '_encoded' for col in cat_cols]
indexer = (StringIndexer()
.setInputCols(cat_cols)
.setOutputCols(indexed_cols)
.setHandleInvalid('skip'))
ohe = (OneHotEncoder()
.setInputCols(indexed_cols)
.setOutputCols(encoded_cols))
vector_assembler = (VectorAssembler()
.setInputCols(encoded_cols)
.setOutputCol('features'))
lr = LinearRegression(featuresCol='features', labelCol='price')
lr_pipeline = Pipeline(stages=[indexer, ohe, vector_assembler, lr])
lr_pipeline_model = lr_pipeline.fit(train_df)
pred_df = lr_pipeline_model.transform(test_df)
pred_df.select('features', 'price', 'prediction').show(5)
reg_eval = RegressionEvaluator(predictionCol='prediction', labelCol='price', metricName='rmse')
reg_eval.evaluate(pred_df)
reg_eval.setMetricName('r2').evaluate(pred_df)
lr_pipeline_model.write().overwrite().save('linear_reg_pip')
lr_pipeline_model = PipelineModel.load('linear_reg_pip')
reg_eval.setMetricName('r2').evaluate(lr_pipeline_model.transform(test_df))
```
# RandomForest
```
indexed_cols = [col + '_indexed' for col in cat_cols]
indexer = (StringIndexer()
.setInputCols(cat_cols)
.setOutputCols(indexed_cols)
.setHandleInvalid('skip'))
vector_assembler = (VectorAssembler()
.setInputCols(indexed_cols)
.setOutputCol('features'))
rf = RandomForestRegressor(featuresCol='features', labelCol='price', maxBins=40, seed=42)
rf_pipeline = Pipeline(stages=[indexer, vector_assembler, rf])
rf_pipeline_model = rf_pipeline.fit(train_df)
pred_df = rf_pipeline_model.transform(test_df)
pred_df.select('features', 'price', 'prediction').show(5)
reg_eval = RegressionEvaluator(predictionCol='prediction', labelCol='price', metricName='rmse')
reg_eval.evaluate(pred_df)
reg_eval.setMetricName('r2').evaluate(pred_df)
(pd.DataFrame(
list(
zip(rf_pipeline_model.stages[-2].getInputCols(), rf_pipeline_model.stages[-1].featureImportances))
, columns=['feature', 'importance'])
.sort_values(by='importance', ascending=False))
```
# Hyperparameter Tuning
```
indexed_cols = [col + '_indexed' for col in cat_cols]
indexer = (StringIndexer()
.setInputCols(cat_cols)
.setOutputCols(indexed_cols)
.setHandleInvalid('skip'))
vector_assembler = (VectorAssembler()
.setInputCols(indexed_cols)
.setOutputCol('features'))
rf = RandomForestRegressor(featuresCol='features', labelCol='price', maxBins=40, seed=42)
rf_pipeline = Pipeline(stages=[indexer, vector_assembler, rf])
evaluator = RegressionEvaluator(labelCol='price', predictionCol='prediction', metricName='rmse')
param_grid = (ParamGridBuilder()
.addGrid(rf.maxDepth, [2, 4, 6])
.addGrid(rf.numTrees, [10, 100])
.build())
cv = (CrossValidator()
.setEstimator(rf_pipeline)
.setEvaluator(evaluator)
.setEstimatorParamMaps(param_grid)
.setNumFolds(3)
.setSeed(42))
%time cv.fit(train_df)
cv = (CrossValidator()
.setEstimator(rf_pipeline)
.setEvaluator(evaluator)
.setEstimatorParamMaps(param_grid)
.setNumFolds(3)
.setParallelism(4)
.setSeed(42))
%time cv.fit(train_df)
cv = (CrossValidator()
.setEstimator(rf)
.setEvaluator(evaluator)
.setEstimatorParamMaps(param_grid)
.setParallelism(4)
.setNumFolds(3)
.setSeed(42))
rf_pipeline = Pipeline(stages=[indexer, vector_assembler, cv])
%time rf_pipeline.fit(train_df)
```
| github_jupyter |
# Jupyter Notebook Fundamentals
A **notebook** is a collection **cells**. These cells are run to execute code, render formatted text or display graphical visualizations.
## Understanding Code Cells and Markdown Cells
The following cell (with the gray text area) is a code cell.
```
# This is a code cell
# By default, a new cell added in a notebook is a code cell
1 + 1
```
This notebook is written in Python. Because of this, you need to select the appropriate **Kernel** that you use to run the cells of this notebook.
To select your Kernel:
1. In the notebook toolbar, select the **Kernel** dropdown.
2. From the drop-down, select **Python 3**.

The code cell above has not run yet, so the expressions of `1 + 1` has not been evaluated. To run the code cell, select the cell by placing your cursor within the cell text area and do any of the following:
- Press `F5` to run the cell
- Use the cell Run icon to the left of the cell

The following cell is another example of a code cell. Run it to see its output.
```
# This is also a code cell
print("Welcome to your SQL Server 2019 Big Data cluster!")
```
The following cell, which displays its output as formatted text is a text cell that supports [markdown](https://en.wikipedia.org/wiki/Markdown) format.
This is a *text* cell.
To create a text cell, select the cell command menu on the upper-right (...). In the context menu, select **Insert Text Before** to add a text cell above this one, or **Insert Text After** to add one after this cell.

Double click on the above cell and notice how the cell changes to an editable code cell.
A preview of the markdown is displayed below the cell. To finish editing, simply click somewhere outside of the cell or press `Esc`.
### Understanding cell output
By default, a notebook cell will output the value of evaluating the last line the cell.
Run the following cell. Observe that the entire cell is echoed in the output because the cell contains only one line.
```
"Hello SQL world!"
```
Next, examine the following cell. What do you expect the output to be? Run the cell and confirm your understanding.
```
"Hello SQL world!"
"And, hello Jupyter notebook!"
```
If you want to ensure your output displays something, use the `print` method.
```
print("Hello SQL world!")
print("And, hello Jupyter notebook!")
```
Not all code lines return a value to be output. Run the following cell to see one such an example.
```
text_variable = "Hello, hello!"
```
## Running multiple notebook cells
It is not uncommon to need to run (or re-run) a all notebook cells in top to bottom order.
To do this, select **Run Cells** in the toolbar above the notebook. This runs all cells starting from the first.
## Adding code cells
You can add new code cells in the same way you add text cells.
To do this, select the cell command menu on the upper-right (...). In the context menu, select **Insert Code Before** to add a code cell above this one, or **Insert Code After** to add one after this cell.

You can also use this command menu to delete a cell.
## Understanding notebook state
When you execute notebook cells, their execution is backed by a process running on a cluster or locally, depending on the Kernel you select. The state of your notebook, such as the values of variables, is maintained in the process. All variables default to a global scope (unless you author your code so it has nested scopes) and this global state can be a little confusing at first when you re-run cells.
Run the following two cells in order and take note of the value ouput for the variable `y`:
```
x = 10
y = x + 1
y
```
Next, run the following cell.
```
x = 100
```
Now select the cell that has the lines `y = x + 1` and `y`. And re-run that cell. Did the value of `y` meet your expectation?
The value of `y` should now be `101`. This is because it is not the actual order of the cells that determines the value, but the order in which they are run and how that affects the underlying state itself. To understand this, realize that when the code `x = 100` was run, this changed the value of `x`, and then when you re-ran the cell containing `y = x + 1` this evaluation used the current value of x which is 100. This resulted in `y` having a value of `101` and not `11`.
### Clearing results
You can use the **Clear Results** toolbar item above the notebook to clear all displayed output from underneath code cells.
You typically do this when you want to cleanly re-run a notebook you have been working on and eliminate any accidental changes to the state that may have occured while you were authoring the notebook.
| github_jupyter |
# Logistic Regression with a Neural Network mindset
Welcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.
**Instructions:**
- Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.
**You will learn to:**
- Build the general architecture of a learning algorithm, including:
- Initializing parameters
- Calculating the cost function and its gradient
- Using an optimization algorithm (gradient descent)
- Gather all three functions above into a main model function, in the right order.
## 1 - Packages ##
First, let's run the cell below to import all the packages that you will need during this assignment.
- [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.
- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.
- [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python.
- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.
```
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
%matplotlib inline
```
## 2 - Overview of the Problem set ##
**Problem Statement**: You are given a dataset ("data.h5") containing:
- a training set of m_train images labeled as cat (y=1) or non-cat (y=0)
- a test set of m_test images labeled as cat or non-cat
- each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).
You will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.
Let's get more familiar with the dataset. Load the data by running the following code.
```
# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
```
We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).
Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images.
```
# Example of a picture
index = 90
plt.imshow(train_set_x_orig[index])
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")
```
Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs.
**Exercise:** Find the values for:
- m_train (number of training examples)
- m_test (number of test examples)
- num_px (= height = width of a training image)
Remember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`.
```
### START CODE HERE ### (โ 3 lines of code)
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
### END CODE HERE ###
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
```
**Expected Output for m_train, m_test and num_px**:
<table style="width:15%">
<tr>
<td>**m_train**</td>
<td> 209 </td>
</tr>
<tr>
<td>**m_test**</td>
<td> 50 </td>
</tr>
<tr>
<td>**num_px**</td>
<td> 64 </td>
</tr>
</table>
For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.
**Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\_px $*$ num\_px $*$ 3, 1).
A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use:
```python
X_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X
```
```
# Reshape the training and test examples
### START CODE HERE ### (โ 2 lines of code)
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0],-1).T
### END CODE HERE ###
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))
```
**Expected Output**:
<table style="width:35%">
<tr>
<td>**train_set_x_flatten shape**</td>
<td> (12288, 209)</td>
</tr>
<tr>
<td>**train_set_y shape**</td>
<td>(1, 209)</td>
</tr>
<tr>
<td>**test_set_x_flatten shape**</td>
<td>(12288, 50)</td>
</tr>
<tr>
<td>**test_set_y shape**</td>
<td>(1, 50)</td>
</tr>
<tr>
<td>**sanity check after reshaping**</td>
<td>[17 31 56 22 33]</td>
</tr>
</table>
To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.
One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel).
<!-- During the training of your model, you're going to multiply weights and add biases to some initial inputs in order to observe neuron activations. Then you backpropogate with the gradients to train the model. But, it is extremely important for each feature to have a similar range such that our gradients don't explode. You will see that more in detail later in the lectures. !-->
Let's standardize our dataset.
```
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
```
<font color='blue'>
**What you need to remember:**
Common steps for pre-processing a new dataset are:
- Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)
- Reshape the datasets such that each example is now a vector of size (num_px \* num_px \* 3, 1)
- "Standardize" the data
## 3 - General Architecture of the learning algorithm ##
It's time to design a simple algorithm to distinguish cat images from non-cat images.
You will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!**
<img src="images/LogReg_kiank.png" style="width:650px;height:400px;">
**Mathematical expression of the algorithm**:
For one example $x^{(i)}$:
$$z^{(i)} = w^T x^{(i)} + b \tag{1}$$
$$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\tag{2}$$
$$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$
The cost is then computed by summing over all training examples:
$$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{6}$$
**Key steps**:
In this exercise, you will carry out the following steps:
- Initialize the parameters of the model
- Learn the parameters for the model by minimizing the cost
- Use the learned parameters to make predictions (on the test set)
- Analyse the results and conclude
## 4 - Building the parts of our algorithm ##
The main steps for building a Neural Network are:
1. Define the model structure (such as number of input features)
2. Initialize the model's parameters
3. Loop:
- Calculate current loss (forward propagation)
- Calculate current gradient (backward propagation)
- Update parameters (gradient descent)
You often build 1-3 separately and integrate them into one function we call `model()`.
### 4.1 - Helper functions
**Exercise**: Using your code from "Python Basics", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp().
```
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
### START CODE HERE ### (โ 1 line of code)
s = 1 / (1 + np.exp(-z))
### END CODE HERE ###
return s
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
```
**Expected Output**:
<table>
<tr>
<td>**sigmoid([0, 2])**</td>
<td> [ 0.5 0.88079708]</td>
</tr>
</table>
### 4.2 - Initializing parameters
**Exercise:** Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation.
```
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
### START CODE HERE ### (โ 1 line of code)
w = np.zeros((dim,1))
b = 0
### END CODE HERE ###
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
```
**Expected Output**:
<table style="width:15%">
<tr>
<td> ** w ** </td>
<td> [[ 0.]
[ 0.]] </td>
</tr>
<tr>
<td> ** b ** </td>
<td> 0 </td>
</tr>
</table>
For image inputs, w will be of shape (num_px $\times$ num_px $\times$ 3, 1).
### 4.3 - Forward and Backward propagation
Now that your parameters are initialized, you can do the "forward" and "backward" propagation steps for learning the parameters.
**Exercise:** Implement a function `propagate()` that computes the cost function and its gradient.
**Hints**:
Forward Propagation:
- You get X
- You compute $A = \sigma(w^T X + b) = (a^{(0)}, a^{(1)}, ..., a^{(m-1)}, a^{(m)})$
- You calculate the cost function: $J = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$
Here are the two formulas you will be using:
$$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{7}$$
$$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{8}$$
```
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
"""
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
### START CODE HERE ### (โ 2 lines of code)
A = sigmoid(np.dot(w.T,X)+b) # compute activation
#print(A.shape)
#print(Y.shape)
cost = -1 / m * np.sum(Y*np.log(A)+(1-Y)*np.log(1-A)) # compute cost
### END CODE HERE ###
# BACKWARD PROPAGATION (TO FIND GRAD)
### START CODE HERE ### (โ 2 lines of code)
dw = 1 / m * np.dot(X,(A-Y).T)
db = 1 / m * np.sum(A-Y)
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw,
"db": db}
return grads, cost
w, b, X, Y = np.array([[1],[2]]), 2, np.array([[1,2],[3,4]]), np.array([[1,0]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
```
**Expected Output**:
<table style="width:50%">
<tr>
<td> ** dw ** </td>
<td> [[ 0.99993216]
[ 1.99980262]]</td>
</tr>
<tr>
<td> ** db ** </td>
<td> 0.499935230625 </td>
</tr>
<tr>
<td> ** cost ** </td>
<td> 6.000064773192205</td>
</tr>
</table>
### d) Optimization
- You have initialized your parameters.
- You are also able to compute a cost function and its gradient.
- Now, you want to update the parameters using gradient descent.
**Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\theta$, the update rule is $ \theta = \theta - \alpha \text{ } d\theta$, where $\alpha$ is the learning rate.
```
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
"""
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (โ 1-4 lines of code)
### START CODE HERE ###
grads, cost = propagate(w, b, X, Y)
### END CODE HERE ###
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (โ 2 lines of code)
### START CODE HERE ###
w = w - dw *learning_rate
b = b - db *learning_rate
### END CODE HERE ###
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training examples
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
```
**Expected Output**:
<table style="width:40%">
<tr>
<td> **w** </td>
<td>[[ 0.1124579 ]
[ 0.23106775]] </td>
</tr>
<tr>
<td> **b** </td>
<td> 1.55930492484 </td>
</tr>
<tr>
<td> **dw** </td>
<td> [[ 0.90158428]
[ 1.76250842]] </td>
</tr>
<tr>
<td> **db** </td>
<td> 0.430462071679 </td>
</tr>
</table>
**Exercise:** The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There is two steps to computing predictions:
1. Calculate $\hat{Y} = A = \sigma(w^T X + b)$
2. Convert the entries of a into 0 (if activation <= 0.5) or 1 (if activation > 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this).
```
# GRADED FUNCTION: predict
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
### START CODE HERE ### (โ 1 line of code)
A = sigmoid(np.dot(w.T,X)+b)
### END CODE HERE ###
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
### START CODE HERE ### (โ 4 lines of code)
if A[0,i]>0.5:
Y_prediction[0,i] =1
else:
Y_prediction[0,i] =0
### END CODE HERE ###
assert(Y_prediction.shape == (1, m))
return Y_prediction
print ("predictions = " + str(predict(w, b, X)))
```
**Expected Output**:
<table style="width:30%">
<tr>
<td>
**predictions**
</td>
<td>
[[ 1. 1.]]
</td>
</tr>
</table>
<font color='blue'>
**What to remember:**
You've implemented several functions that:
- Initialize (w,b)
- Optimize the loss iteratively to learn parameters (w,b):
- computing the cost and its gradient
- updating the parameters using gradient descent
- Use the learned (w,b) to predict the labels for a given set of examples
## 5 - Merge all functions into a model ##
You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.
**Exercise:** Implement the model function. Use the following notation:
- Y_prediction for your predictions on the test set
- Y_prediction_train for your predictions on the train set
- w, costs, grads for the outputs of optimize()
```
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
### START CODE HERE ###
# initialize parameters with zeros (โ 1 line of code)
w, b = initialize_with_zeros(X_train.shape[0])
# Gradient descent (โ 1 line of code)
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost = False)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (โ 2 lines of code)
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
### END CODE HERE ###
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
```
Run the following cell to train your model.
```
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 20000, learning_rate = 0.005, print_cost = True)
```
**Expected Output**:
<table style="width:40%">
<tr>
<td> **Train Accuracy** </td>
<td> 99.04306220095694 % </td>
</tr>
<tr>
<td>**Test Accuracy** </td>
<td> 70.0 % </td>
</tr>
</table>
**Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test error is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!
Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set.
```
# Example of a picture that was wrongly classified.
index = 30
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[d["Y_prediction_test"][0,index]].decode("utf-8") + "\" picture.")
```
Let's also plot the cost function and the gradients.
```
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
```
**Interpretation**:
You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting.
## 6 - Further analysis (optional/ungraded exercise) ##
Congratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\alpha$.
#### Choice of learning rate ####
**Reminder**:
In order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may "overshoot" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.
Let's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens.
```
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
```
**Interpretation**:
- Different learning rates give different costs and thus different predictions results.
- If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost).
- A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.
- In deep learning, we usually recommend that you:
- Choose the learning rate that better minimizes the cost function.
- If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.)
## 7 - Test with your own image (optional/ungraded exercise) ##
Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Change your image's name in the following code
4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
```
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "mycat.jpg" # change this to the name of your image file
## END CODE HERE ##
# We preprocess the image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
```
<font color='blue'>
**What to remember from this assignment:**
1. Preprocessing the dataset is important.
2. You implemented each function separately: initialize(), propagate(), optimize(). Then you built a model().
3. Tuning the learning rate (which is an example of a "hyperparameter") can make a big difference to the algorithm. You will see more examples of this later in this course!
Finally, if you'd like, we invite you to try different things on this Notebook. Make sure you submit before trying anything. Once you submit, things you can play with include:
- Play with the learning rate and the number of iterations
- Try different initialization methods and compare the results
- Test other preprocessings (center the data, or divide each row by its standard deviation)
Bibliography:
- http://www.wildml.com/2015/09/implementing-a-neural-network-from-scratch/
- https://stats.stackexchange.com/questions/211436/why-do-we-normalize-images-by-subtracting-the-datasets-image-mean-and-not-the-c
| github_jupyter |
```
import torch
import torch.nn as nn
from torchinfo import summary
def build_circle_segmenter():
circle_segmenter = nn.Sequential(
nn.Conv2d(
in_channels=1,
out_channels=8,
kernel_size=3,
stride=1,
padding=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=8,
out_channels=8,
kernel_size=3,
stride=1,
padding=1
),
nn.ReLU(),
nn.MaxPool2d(
kernel_size=2,
stride=2
),
nn.Conv2d(
in_channels=8,
out_channels=16,
kernel_size=3,
stride=1,
padding=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=16,
out_channels=16,
kernel_size=3,
stride=1,
padding=1
),
nn.ReLU(),
nn.MaxPool2d(
kernel_size=2,
stride=2
),
nn.Conv2d(
in_channels=16,
out_channels=32,
kernel_size=3,
stride=1,
padding=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=32,
out_channels=32,
kernel_size=3,
stride=1,
padding=1
),
nn.ReLU(),
#nn.MaxPool2d(
# kernel_size=2,
# stride=2
#),
#nn.AdaptiveAvgPool2d((8, 8)),
nn.ConvTranspose2d(
in_channels=32,
out_channels=16,
kernel_size=2,
stride=2,
padding=0
),
nn.ReLU(),
nn.Conv2d(
in_channels=16,
out_channels=16,
kernel_size=3,
stride=1,
padding=1
),
nn.ReLU(),
nn.ConvTranspose2d(
in_channels=16,
out_channels=8,
kernel_size=2,
stride=2,
padding=0
),
nn.ReLU(),
nn.Conv2d(
in_channels=8,
out_channels=1,
kernel_size=3,
stride=1,
padding=1
),
nn.ReLU(),
)
return circle_segmenter
summary(build_circle_segmenter(), input_size=(2, 1, 128, 128))
import itertools
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image, ImageDraw
def get_empty_image(image_width, image_height):
# mode="F" for 32-bit floating point pixels
# mode="LA" for 8-bit grayscale with alpha channel
empty_image = Image.new(
mode="F",
size=(image_width, image_height),
color=255
)
return empty_image
def draw_a_circle(target_image, circle_e1, circle_e2, circle_radius, outline_color=255):
"""
The most simple image of a circle?
"""
artist = ImageDraw.ImageDraw(target_image)
artist.arc(
(
circle_e1 - circle_radius/2,
circle_e2 - circle_radius/2,
circle_e1 + circle_radius/2,
circle_e2 + circle_radius/2
),
start=0,
end=360,
width=1,
fill=outline_color
)
return target_image
def fill_a_circle(target_image, circle_e1, circle_e2, circle_radius, circle_fill_color=255):
artist = ImageDraw.ImageDraw(target_image)
artist.ellipse(
(
circle_e1 - circle_radius/2,
circle_e2 - circle_radius/2,
circle_e1 + circle_radius/2,
circle_e2 + circle_radius/2
),
width=1,
#outline=255, # what happens without this?
fill=circle_fill_color
)
return target_image
def image_of_circles(circle_count):
"""
The most simple image of a circle?
"""
image_of_filled_circles = get_empty_image(
image_width=128,
image_height=128
)
circle_radius_list = list()
image_of_outlined_circles = get_empty_image(
image_width=128,
image_height=128
)
circle_parameters_list = list()
for circle_i in range(circle_count):
circle_radius = np.random.randint(low=10, high=40)
circle_radius_list.append(circle_radius)
circle_e1=np.random.randint(low=20, high=80)
circle_e2=np.random.randint(low=20, high=80)
circle_fill_color = np.random.randint(low=100, high=200)
circle_parameters_list.append(
{
"circle_radius": circle_radius,
"circle_e1": circle_e1,
"circle_e2": circle_e2,
"circle_fill_color": circle_fill_color
}
)
for circle_parameters in circle_parameters_list:
# these are the input training images
fill_a_circle(
target_image=image_of_filled_circles,
**circle_parameters
#circle_e1=circle_e1,
#circle_e2=circle_e2,
#circle_radius=circle_radius,
)
# these are the "target" training images
fill_a_circle(
target_image=image_of_outlined_circles,
**circle_parameters
#circle_e1=circle_e1,
#circle_e2=circle_e2,
#circle_radius=circle_radius,
)
# draw outlines on the "target" training images
for circle_parameters in circle_parameters_list:
circle_parameters.pop("circle_fill_color")
draw_a_circle(
target_image=image_of_outlined_circles,
outline_color=0,
**circle_parameters,
#circle_e1=circle_e1,
#circle_e2=circle_e2,
#circle_radius=circle_radius,
)
return (circle_radius_list, image_of_filled_circles, image_of_outlined_circles)
import matplotlib.pyplot as plt
# generate 4 sets of input/output
circle_radiuses_list = list()
input_images = list()
output_images = list()
for _ in range(4):
circle_count = np.random.randint(low=1, high=5)
circle_radiuses, input_circles_image, output_circles_image = image_of_circles(circle_count=circle_count)
circle_radiuses_list.append(circle_radiuses)
input_images.append(input_circles_image)
output_images.append(output_circles_image)
fig, axs = plt.subplots(nrows=2, ncols=2)
for i, (r, c) in enumerate(itertools.product(range(2), range(2))):
print('circle radiuses: {}'.format(circle_radiuses))
axs[r][c].imshow(input_images[i], origin="lower")
#print(np.array(im))
fig, axs = plt.subplots(nrows=2, ncols=2)
for i, (r, c) in enumerate(itertools.product(range(2), range(2))):
print('circle radiuses: {}'.format(circle_radiuses))
axs[r][c].imshow(output_images[i], origin="lower")
#print(np.array(im))
# a class to interact with DataLoaders
class CircleImageDataset:
def __init__(self, circle_image_count):
self.circle_image_list = list()
for i in range(circle_image_count):
circle_count = np.random.randint(low=1, high=5)
circle_radius_list, input_circles_image, target_circles_image = image_of_circles(
circle_count=circle_count
)
# sort the circle radiuses in descending order
# otherwise the training data is a little ambiguous?
#sorted_circle_radius_list = sorted(circle_radius_list, reverse=True)
# the network output is a 8-element array of circle radiuses
#circle_radiuses = np.zeros((8, ), dtype=np.float32)
#circle_radiuses[:circle_count] = sorted_circle_radius_list
self.circle_image_list.append(
(
# get the right type here - single precision floating point
# this depends on how the optimization is handled
# but I want to get it right here
#circle_radiuses,
# the PIL image is converted to a 2D numpy array here
# in addition an extra dimension is inserted for 'channel'
# which PyTorch convolutional networks expect
np.expand_dims(
np.array(target_circles_image),
axis=0
),
# the PIL image is converted to a 2D numpy array here
# in addition an extra dimension is inserted for 'channel'
# which PyTorch convolutional networks expect
np.expand_dims(
np.array(input_circles_image),
axis=0
)
)
)
def __getitem__(self, index):
# self.circle_image_list looks like
# [ (radius_0, radius_1, ...), image_0), (radius_0, radius_1, ...), image_1), ...]
# this dataset returns only (radius, image)
return self.circle_image_list[index]
def __len__(self):
return len(self.circle_image_list)
def test_circle_image_dataset():
circle_image_dataset = CircleImageDataset(100)
print(f"len(circle_image_dataset): {len(circle_image_dataset)}")
target_circle_image, input_circle_image = circle_image_dataset[99]
print(f"target image.shape : {target_circle_image.shape}")
print(f"input image.shape : {input_circle_image.shape}")
test_circle_image_dataset()
from torch.utils.data import DataLoader
def test_circle_image_dataloader():
circle_image_dataloader = DataLoader(CircleImageDataset(circle_image_count=100), batch_size=10)
for batch in circle_image_dataloader:
print(f"len(batch): {len(batch)}")
print(f"len(batch[0]): {len(batch[0])}")
print(f"batch[0].shape: {batch[0].shape}")
print(f"len(batch[1]): {len(batch[1])}")
print(f"batch[1].shape: {batch[1].shape}")
target_circle_images, input_circle_images = batch
# note correct_radii.shape does not match predicted_radii.shape
print(f"target_circle_images.shape: {target_circle_images.shape}")
print(f"target_circle_images.dtype: {target_circle_images.dtype}")
print(f"input_circle_images.shape: {input_circle_images.shape}")
test_circle_segmenter = build_circle_segmenter()
predicted_circle_images = test_circle_segmenter(input_circle_images)
print(f"predicted_circle_images.shape: {predicted_circle_images.shape}")
print(f"predicted_circle_images.dtype: {predicted_circle_images.dtype}")
break
test_circle_image_dataloader()
# 100,000, no shuffle, works, 20 epochs is ok but sometimes training does not progress
# 100,000 with shuffling has more stable training
# 10,000, no shuffle, works, 50 epochs is ok
train_circle_image_loader = DataLoader(
CircleImageDataset(circle_image_count=10000),
batch_size=100,
shuffle=True
)
test_circle_image_loader = DataLoader(
CircleImageDataset(circle_image_count=1000),
batch_size=100
)
#validate_circle_image_loader = DataLoader(CircleImageDataset(circle_image_count=1000), batch_size=100)
len(train_circle_image_loader.dataset)
def train(
circle_segmenter_model,
optimizer,
loss_function,
train_dataloader,
test_dataloader,
epoch_count
):
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
circle_segmenter_model.to(device)
for epoch_i in range(epoch_count):
training_loss = 0.0
circle_segmenter_model.train()
for correct_segmented_circle_images, circle_images in train_dataloader:
optimizer.zero_grad()
# torch calls circle_images 'inputs'
circle_images = circle_images.to(device)
# make the correct_radii array match predicted_radii.shape
#correct_radii = torch.unsqueeze(correct_radii, 1)
correct_segmented_circle_images = correct_segmented_circle_images.to(device)
predicted_radii = circle_segmenter_model(circle_images)
loss = loss_function(predicted_radii, correct_segmented_circle_images)
loss.backward()
optimizer.step()
training_loss += loss.data.item()
training_loss /= len(train_circle_image_loader.dataset)
test_loss = 0.0
circle_segmenter_model.eval()
for correct_segmented_circle_images, circle_images in test_dataloader:
# torch calls circle_images 'inputs'
circle_images = circle_images.to(device)
#inputs = inputs.to(device)
# make correct_radii have the same shape as predicted_radii
#correct_radii = torch.unsqueeze(correct_radii, 1)
correct_segmented_circle_images = correct_segmented_circle_images.to(device)
predicted_radii = circle_segmenter_model(circle_images)
loss = loss_function(predicted_radii, correct_segmented_circle_images)
test_loss += loss.data.item()
test_loss /= len(test_dataloader.dataset)
print(
#'Epoch: {}, Training Loss: {:.2f}, Test Loss: {:.2f}, percent_wrong = {}'.format(
'Epoch: {}, Training Loss: {:.2f}, Test Loss: {:.2f}'.format(
epoch_i, training_loss, test_loss
)
)
import torch.optim
circle_segmenter = build_circle_segmenter()
train(
circle_segmenter,
torch.optim.Adam(circle_segmenter.parameters()),
torch.nn.MSELoss(),
train_circle_image_loader,
test_circle_image_loader,
epoch_count=100
)
# try out the circle segmenter
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
circle_segmenter.eval()
a_circle_image_dataloader = DataLoader(CircleImageDataset(10), batch_size=1)
for a_circle_target_image, a_circle_input_image in a_circle_image_dataloader:
print(f"input shape : {a_circle_input_image.shape}")
a_segmented_circle_tensor = circle_segmenter(a_circle_input_image.to(device))
a_segmented_circle_image = a_segmented_circle_tensor.cpu().detach().numpy()
print(f"output shape: {a_segmented_circle_image.shape}")
fig, axs = plt.subplots(nrows=1, ncols=2)
#for i, (r, c) in enumerate(itertools.product(range(1), range(2))):
#print('circle radiuses: {}'.format(circle_radiuses))
axs[0].imshow(a_circle_input_image[0, 0, :, :], origin="lower")
axs[1].imshow(a_segmented_circle_image[0, 0, :, :], origin="lower")
#print(np.array(im))
```
| github_jupyter |
# Consulting Project
## Recommender Systems
Your final result should be in the form of a function that can take in a Spark DataFrame of a single customer's ratings for various meals and output their top 3 suggested meals. For example:
```
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn')
%matplotlib inline
df = pd.read_csv('movielens_ratings.csv')
df.describe().transpose()
df.corr()
sns.heatmap(df.corr(), cmap='coolwarm')
import numpy as np
df['mealskew'] = df['movieId'].apply(lambda id: np.nan if id > 31 else id)
df.describe().transpose()
mealmap = { 2. : "Chicken Curry",
3. : "Spicy Chicken Nuggest",
5. : "Hamburger",
9. : "Taco Surprise",
11. : "Meatloaf",
12. : "Ceaser Salad",
15. : "BBQ Ribs",
17. : "Sushi Plate",
19. : "Cheesesteak Sandwhich",
21. : "Lasagna",
23. : "Orange Chicken",
26. : "Spicy Beef Plate",
27. : "Salmon with Mashed Potatoes",
28. : "Penne Tomatoe Pasta",
29. : "Pork Sliders",
30. : "Vietnamese Sandwich",
31. : "Chicken Wrap",
np.nan: "Cowboy Burger",
4. : "Pretzels and Cheese Plate",
6. : "Spicy Pork Sliders",
13. : "Mandarin Chicken PLate",
14. : "Kung Pao Chicken",
16. : "Fried Rice Plate",
8. : "Chicken Chow Mein",
10. : "Roasted Eggplant ",
18. : "Pepperoni Pizza",
22. : "Pulled Pork Plate",
0. : "Cheese Pizza",
1. : "Burrito",
7. : "Nachos",
24. : "Chili",
20. : "Southwest Salad",
25.: "Roast Beef Sandwich"}
df['meal_name'] = df['mealskew'].map(mealmap)
df.head()
df.to_csv('Meal_Info.csv',index=False)
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('recconsulting').getOrCreate()
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.recommendation import ALS
data = spark.read.csv('Meal_Info.csv',inferSchema=True,header=True)
data.printSchema()
data.describe().show()
data.show()
data = data.na.drop(how='any')
(training, test) = data.randomSplit([0.8, 0.2])
# Build the recommendation model using ALS on the training data
als = ALS(maxIter=5, regParam=0.01, userCol="userId", itemCol="mealskew", ratingCol="rating")
model = als.fit(training)
# Evaluate the model by computing the RMSE on the test data
predictions = model.transform(test)
predictions.show()
evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating",predictionCol="prediction")
rmse = evaluator.evaluate(predictions)
print("Root-mean-square error = " + str(rmse))
# Single_user
single_user = test.filter(test['userId'] ==19).select(['movieId', 'userId', 'mealskew'])
single_user.show()
recommendations = model.transform(single_user)
recommendations.orderBy('prediction', ascending=False).show()
```
| github_jupyter |
```
data_paths = {
'Micro': "../data/runs/2018-11-10-micro-rad-khype1e6/",
'NNOrig': "../data/runs/2018-11-09-model188-equilibriation-penalty",
'NN': "../data/runs/2018-11-10-model188-khyp1e6-rerun/"
}
ng_path = "../data/processed/training.nc"
from toolz import valmap
from src.data.sam import SAMRun
from uwnet.thermo import compute_q2
import xarray as xr
import holoviews as hv
hv.extension('bokeh')
%opts Image[width=600, height=400, colorbar=True](cmap='viridis')
%opts Curve[width=400]
def plot_xy(da, dynamic=True):
return hv.Dataset(da).to.image(["x", "y"], dynamic=dynamic)
data_paths = {
'Micro': "../data/runs/2018-11-10-micro-rad-khype1e6/",
'NNOrig': "../data/runs/2018-11-09-model188-equilibriation-penalty",
'NN': "../data/runs/2018-11-10-model188-khyp1e6-rerun/"
}
ng_path = "../data/processed/training.nc"
ngaqua = xr.open_dataset(ng_path).isel(step=0)
runs = valmap(SAMRun, data_paths)
ngaqua = xr.open_dataset(ng_path).isel(step=0)
```
# Net Precip
## Spinup
```
def get_first_steps(ds):
first_step = ds.isel(time=slice(1,4))
first_step['time'] = (first_step.time-first_step.time[0])*86400
first_step.time.attrs['units'] = 's'
return first_step
ds = runs['NN'].data_3d
first_step = get_first_steps(ds)
first_step.FQTNN.mean(['x']).plot(col='time')
```
Here is a comparision of P-E over these first few time steps:
```
net_precip_nn = -(ngaqua.layer_mass * ds.FQTNN).sum('z')/1000
net_precip_nn['time'] = (net_precip_nn.time -net_precip_nn.time[0])*24
net_precip_nn[[1, 5, 6, 7]].mean('x').plot(hue='time')
plt.title("Zonal mean P-E (time in hours)");
```
## Comparison to NG-Aqua
Let's open the NGAqua data and plot Q2
```
from uwnet.thermo import compute_q2
q2 = compute_q2(ngaqua.isel(time=slice(0, 2))).dropna('time')
```
Here is thea actual mean Q2 in the first tiem step
```
q2.mean('x').plot()
```
Here is a comparision of the zonally averaged net precipitation.
```
q2_int = -(ngaqua.layer_mass * q2).sum('z')/1000
fqtnn_int = -(ngaqua.layer_mass * first_step.FQTNN).sum('z')/1000
q2_int.mean(['x']).plot(label='NGaqua')
fqtnn_int[0].mean('x').plot(label='NN-prediction')
plt.title("Zonal mean of Net Precip")
plt.legend()
```
There is some systematic difference, but overally it is too noisy to tell from the first time step
# Spin up of vertical velocity
```
plot_data = runs['NN'].data_3d.W[4::3, 8]
dmap = plot_xy(plot_data, dynamic=False)\
.relabel(f"W at z={float(plot_data.z)}")
dmap
```
| github_jupyter |
# Deep Neural Network for Image Classification: Application
When you finish this, you will have finished the last programming assignment of Week 4, and also the last programming assignment of this course!
You will use the functions you'd implemented in the previous assignment to build a deep network, and apply it to cat vs non-cat classification. Hopefully, you will see an improvement in accuracy relative to your previous logistic regression implementation.
**After this assignment you will be able to:**
- Build and apply a deep neural network to supervised learning.
Let's get started!
## 1 - Packages
Let's first import all the packages that you will need during this assignment.
- [numpy](https://www.numpy.org/) is the fundamental package for scientific computing with Python.
- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.
- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.
- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.
- dnn_app_utils provides the functions implemented in the "Building your Deep Neural Network: Step by Step" assignment to this notebook.
- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
```
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from dnn_app_utils_v3 import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
```
## 2 - Dataset
You will use the same "Cat vs non-Cat" dataset as in "Logistic Regression as a Neural Network" (Assignment 2). The model you had built had 70% test accuracy on classifying cats vs non-cats images. Hopefully, your new model will perform a better!
**Problem Statement**: You are given a dataset ("data.h5") containing:
- a training set of m_train images labelled as cat (1) or non-cat (0)
- a test set of m_test images labelled as cat and non-cat
- each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB).
Let's get more familiar with the dataset. Load the data by running the cell below.
```
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()
```
The following code will show you an image in the dataset. Feel free to change the index and re-run the cell multiple times to see other images.
```
# Example of a picture
index = 25
plt.imshow(train_x_orig[index])
print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
# Explore your dataset
m_train = train_x_orig.shape[0]
num_px = train_x_orig.shape[1]
m_test = test_x_orig.shape[0]
print ("Number of training examples: " + str(m_train))
print ("Number of testing examples: " + str(m_test))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_x_orig shape: " + str(train_x_orig.shape))
print ("train_y shape: " + str(train_y.shape))
print ("test_x_orig shape: " + str(test_x_orig.shape))
print ("test_y shape: " + str(test_y.shape))
```
As usual, you reshape and standardize the images before feeding them to the network. The code is given in the cell below.
<img src="images/imvectorkiank.png" style="width:450px;height:300px;">
<caption><center> <u>Figure 1</u>: Image to vector conversion. <br> </center></caption>
```
# Reshape the training and test examples
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The "-1" makes reshape flatten the remaining dimensions
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten/255
test_x = test_x_flatten/255
print ("train_x's shape: " + str(train_x.shape))
print ("test_x's shape: " + str(test_x.shape))
```
$12,288$ equals $64 \times 64 \times 3$ which is the size of one reshaped image vector.
## 3 - Architecture of your model
Now that you are familiar with the dataset, it is time to build a deep neural network to distinguish cat images from non-cat images.
You will build two different models:
- A 2-layer neural network
- An L-layer deep neural network
You will then compare the performance of these models, and also try out different values for $L$.
Let's look at the two architectures.
### 3.1 - 2-layer neural network
<img src="images/2layerNN_kiank.png" style="width:650px;height:400px;">
<caption><center> <u>Figure 2</u>: 2-layer neural network. <br> The model can be summarized as: ***INPUT -> LINEAR -> RELU -> LINEAR -> SIGMOID -> OUTPUT***. </center></caption>
<u>Detailed Architecture of figure 2</u>:
- The input is a (64,64,3) image which is flattened to a vector of size $(12288,1)$.
- The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ of size $(n^{[1]}, 12288)$.
- You then add a bias term and take its relu to get the following vector: $[a_0^{[1]}, a_1^{[1]},..., a_{n^{[1]}-1}^{[1]}]^T$.
- You then repeat the same process.
- You multiply the resulting vector by $W^{[2]}$ and add your intercept (bias).
- Finally, you take the sigmoid of the result. If it is greater than 0.5, you classify it to be a cat.
### 3.2 - L-layer deep neural network
It is hard to represent an L-layer deep neural network with the above representation. However, here is a simplified network representation:
<img src="images/LlayerNN_kiank.png" style="width:650px;height:400px;">
<caption><center> <u>Figure 3</u>: L-layer neural network. <br> The model can be summarized as: ***[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID***</center></caption>
<u>Detailed Architecture of figure 3</u>:
- The input is a (64,64,3) image which is flattened to a vector of size (12288,1).
- The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ and then you add the intercept $b^{[1]}$. The result is called the linear unit.
- Next, you take the relu of the linear unit. This process could be repeated several times for each $(W^{[l]}, b^{[l]})$ depending on the model architecture.
- Finally, you take the sigmoid of the final linear unit. If it is greater than 0.5, you classify it to be a cat.
### 3.3 - General methodology
As usual you will follow the Deep Learning methodology to build the model:
1. Initialize parameters / Define hyperparameters
2. Loop for num_iterations:
a. Forward propagation
b. Compute cost function
c. Backward propagation
d. Update parameters (using parameters, and grads from backprop)
4. Use trained parameters to predict labels
Let's now implement those two models!
## 4 - Two-layer neural network
**Question**: Use the helper functions you have implemented in the previous assignment to build a 2-layer neural network with the following structure: *LINEAR -> RELU -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:
```python
def initialize_parameters(n_x, n_h, n_y):
...
return parameters
def linear_activation_forward(A_prev, W, b, activation):
...
return A, cache
def compute_cost(AL, Y):
...
return cost
def linear_activation_backward(dA, cache, activation):
...
return dA_prev, dW, db
def update_parameters(parameters, grads, learning_rate):
...
return parameters
```
```
### CONSTANTS DEFINING THE MODEL ####
n_x = 12288 # num_px * num_px * 3
n_h = 7
n_y = 1
layers_dims = (n_x, n_h, n_y)
# GRADED FUNCTION: two_layer_model
def two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):
"""
Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (n_x, number of examples)
Y -- true "label" vector (containing 1 if cat, 0 if non-cat), of shape (1, number of examples)
layers_dims -- dimensions of the layers (n_x, n_h, n_y)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- If set to True, this will print the cost every 100 iterations
Returns:
parameters -- a dictionary containing W1, W2, b1, and b2
"""
np.random.seed(1)
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
(n_x, n_h, n_y) = layers_dims
# Initialize parameters dictionary, by calling one of the functions you'd previously implemented
### START CODE HERE ### (โ 1 line of code)
parameters = initialize_parameters(n_x, n_h, n_y)
### END CODE HERE ###
# Get W1, b1, W2 and b2 from the dictionary parameters.
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: "X, W1, b1, W2, b2". Output: "A1, cache1, A2, cache2".
### START CODE HERE ### (โ 2 lines of code)
A1, cache1 = linear_activation_forward(X, W1, b1, activation="relu")
A2, cache2 = linear_activation_forward(A1, W2, b2, activation="sigmoid")
### END CODE HERE ###
# Compute cost
### START CODE HERE ### (โ 1 line of code)
cost = compute_cost(A2, Y)
### END CODE HERE ###
# Initializing backward propagation
dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))
# Backward propagation. Inputs: "dA2, cache2, cache1". Outputs: "dA1, dW2, db2; also dA0 (not used), dW1, db1".
### START CODE HERE ### (โ 2 lines of code)
dA1, dW2, db2 = linear_activation_backward(dA2, cache2, activation="sigmoid")
dA0, dW1, db1 = linear_activation_backward(dA1, cache1, activation="relu")
### END CODE HERE ###
# Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2
grads['dW1'] = dW1
grads['db1'] = db1
grads['dW2'] = dW2
grads['db2'] = db2
# Update parameters.
### START CODE HERE ### (approx. 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Retrieve W1, b1, W2, b2 from parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
```
Run the cell below to train your parameters. See if your model runs. The cost should be decreasing. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (โฌ) on the upper bar of the notebook to stop the cell and try to find your error.
```
parameters = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2500, print_cost=True)
```
**Expected Output**:
<table>
<tr>
<td> **Cost after iteration 0**</td>
<td> 0.6930497356599888 </td>
</tr>
<tr>
<td> **Cost after iteration 100**</td>
<td> 0.6464320953428849 </td>
</tr>
<tr>
<td> **...**</td>
<td> ... </td>
</tr>
<tr>
<td> **Cost after iteration 2400**</td>
<td> 0.048554785628770226 </td>
</tr>
</table>
Good thing you built a vectorized implementation! Otherwise it might have taken 10 times longer to train this.
Now, you can use the trained parameters to classify images from the dataset. To see your predictions on the training and test sets, run the cell below.
```
predictions_train = predict(train_x, train_y, parameters)
```
**Expected Output**:
<table>
<tr>
<td> **Accuracy**</td>
<td> 1.0 </td>
</tr>
</table>
```
predictions_test = predict(test_x, test_y, parameters)
```
**Expected Output**:
<table>
<tr>
<td> **Accuracy**</td>
<td> 0.72 </td>
</tr>
</table>
**Note**: You may notice that running the model on fewer iterations (say 1500) gives better accuracy on the test set. This is called "early stopping" and we will talk about it in the next course. Early stopping is a way to prevent overfitting.
Congratulations! It seems that your 2-layer neural network has better performance (72%) than the logistic regression implementation (70%, assignment week 2). Let's see if you can do even better with an $L$-layer model.
## 5 - L-layer Neural Network
**Question**: Use the helper functions you have implemented previously to build an $L$-layer neural network with the following structure: *[LINEAR -> RELU]$\times$(L-1) -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:
```python
def initialize_parameters_deep(layers_dims):
...
return parameters
def L_model_forward(X, parameters):
...
return AL, caches
def compute_cost(AL, Y):
...
return cost
def L_model_backward(AL, Y, caches):
...
return grads
def update_parameters(parameters, grads, learning_rate):
...
return parameters
```
```
### CONSTANTS ###
layers_dims = [12288, 20, 7, 5, 1] # 4-layer model
# GRADED FUNCTION: L_layer_model
def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization. (โ 1 line of code)
### START CODE HERE ###
parameters = initialize_parameters_deep(layers_dims)
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
### START CODE HERE ### (โ 1 line of code)
AL, caches = L_model_forward(X, parameters)
### END CODE HERE ###
# Compute cost.
### START CODE HERE ### (โ 1 line of code)
cost = compute_cost(AL, Y)
### END CODE HERE ###
# Backward propagation.
### START CODE HERE ### (โ 1 line of code)
grads = L_model_backward(AL, Y, caches)
### END CODE HERE ###
# Update parameters.
### START CODE HERE ### (โ 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
```
You will now train the model as a 4-layer neural network.
Run the cell below to train your model. The cost should decrease on every iteration. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (โฌ) on the upper bar of the notebook to stop the cell and try to find your error.
```
parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)
```
**Expected Output**:
<table>
<tr>
<td> **Cost after iteration 0**</td>
<td> 0.771749 </td>
</tr>
<tr>
<td> **Cost after iteration 100**</td>
<td> 0.672053 </td>
</tr>
<tr>
<td> **...**</td>
<td> ... </td>
</tr>
<tr>
<td> **Cost after iteration 2400**</td>
<td> 0.092878 </td>
</tr>
</table>
```
pred_train = predict(train_x, train_y, parameters)
```
<table>
<tr>
<td>
**Train Accuracy**
</td>
<td>
0.985645933014
</td>
</tr>
</table>
```
pred_test = predict(test_x, test_y, parameters)
```
**Expected Output**:
<table>
<tr>
<td> **Test Accuracy**</td>
<td> 0.8 </td>
</tr>
</table>
Congrats! It seems that your 4-layer neural network has better performance (80%) than your 2-layer neural network (72%) on the same test set.
This is good performance for this task. Nice job!
Though in the next course on "Improving deep neural networks" you will learn how to obtain even higher accuracy by systematically searching for better hyperparameters (learning_rate, layers_dims, num_iterations, and others you'll also learn in the next course).
## 6) Results Analysis
First, let's take a look at some images the L-layer model labeled incorrectly. This will show a few mislabeled images.
```
print_mislabeled_images(classes, test_x, test_y, pred_test)
```
**A few types of images the model tends to do poorly on include:**
- Cat body in an unusual position
- Cat appears against a background of a similar color
- Unusual cat color and species
- Camera Angle
- Brightness of the picture
- Scale variation (cat is very large or small in image)
## 7) Test with your own image (optional/ungraded exercise) ##
Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Change your image's name in the following code
4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
```
from PIL import Image
fileImage = Image.open("test.png").convert("RGB").resize([num_px,num_px],Image.ANTIALIAS)
my_label_y = [1] # the true class of your image (1 -> cat, 0 -> non-cat)
image = np.array(fileImage)
my_image = image.reshape(num_px*num_px*3,1)
my_image = my_image/255.
my_predicted_image = predict(my_image, my_label_y, parameters)
plt.imshow(image)
print ("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
```
**References**:
- for auto-reloading external module: http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
```
import pickle # For storing an array in a '.pickle' file for later use
# If you want to save multiple variables, put them into an array
my_content = [train_x_orig, train_y, test_x_orig, test_y, classes]
# Saving variables in an array
with open("trainingDataset.pickle",'wb') as fileToBeWritten:
# For compatibility we use open(filename, 'wb') for non-text files and open(filename, 'w') for text files
pickle.dump(my_content,fileToBeWritten)
# Loading Variables
with open('trainingDatasetL-LayerNN.pickle','rb') as fileToBeRead:
ttrain_x_orig, train_y, test_x_orig, test_y, classes = pickle.load(fileToBeRead)
```
| github_jupyter |
**This notebook is an exercise in the [Intermediate Machine Learning](https://www.kaggle.com/learn/intermediate-machine-learning) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/data-leakage).**
---
Most people find target leakage very tricky until they've thought about it for a long time.
So, before trying to think about leakage in the housing price example, we'll go through a few examples in other applications. Things will feel more familiar once you come back to a question about house prices.
# Setup
The questions below will give you feedback on your answers. Run the following cell to set up the feedback system.
```
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.ml_intermediate.ex7 import *
print("Setup Complete")
```
# Step 1: The Data Science of Shoelaces
Nike has hired you as a data science consultant to help them save money on shoe materials. Your first assignment is to review a model one of their employees built to predict how many shoelaces they'll need each month. The features going into the machine learning model include:
- The current month (January, February, etc)
- Advertising expenditures in the previous month
- Various macroeconomic features (like the unemployment rate) as of the beginning of the current month
- The amount of leather they ended up using in the current month
The results show the model is almost perfectly accurate if you include the feature about how much leather they used. But it is only moderately accurate if you leave that feature out. You realize this is because the amount of leather they use is a perfect indicator of how many shoes they produce, which in turn tells you how many shoelaces they need.
Do you think the _leather used_ feature constitutes a source of data leakage? If your answer is "it depends," what does it depend on?
After you have thought about your answer, check it against the solution below.
```
# Check your answer (Run this code cell to receive credit!)
q_1.check()
```
# Step 2: Return of the Shoelaces
You have a new idea. You could use the amount of leather Nike ordered (rather than the amount they actually used) leading up to a given month as a predictor in your shoelace model.
Does this change your answer about whether there is a leakage problem? If you answer "it depends," what does it depend on?
```
# Check your answer (Run this code cell to receive credit!)
q_2.check()
```
# 3. Getting Rich With Cryptocurrencies?
You saved Nike so much money that they gave you a bonus. Congratulations.
Your friend, who is also a data scientist, says he has built a model that will let you turn your bonus into millions of dollars. Specifically, his model predicts the price of a new cryptocurrency (like Bitcoin, but a newer one) one day ahead of the moment of prediction. His plan is to purchase the cryptocurrency whenever the model says the price of the currency (in dollars) is about to go up.
The most important features in his model are:
- Current price of the currency
- Amount of the currency sold in the last 24 hours
- Change in the currency price in the last 24 hours
- Change in the currency price in the last 1 hour
- Number of new tweets in the last 24 hours that mention the currency
The value of the cryptocurrency in dollars has fluctuated up and down by over $\$$100 in the last year, and yet his model's average error is less than $\$$1. He says this is proof his model is accurate, and you should invest with him, buying the currency whenever the model says it is about to go up.
Is he right? If there is a problem with his model, what is it?
```
# Check your answer (Run this code cell to receive credit!)
q_3.check()
```
# Step 4: Preventing Infections
An agency that provides healthcare wants to predict which patients from a rare surgery are at risk of infection, so it can alert the nurses to be especially careful when following up with those patients.
You want to build a model. Each row in the modeling dataset will be a single patient who received the surgery, and the prediction target will be whether they got an infection.
Some surgeons may do the procedure in a manner that raises or lowers the risk of infection. But how can you best incorporate the surgeon information into the model?
You have a clever idea.
1. Take all surgeries by each surgeon and calculate the infection rate among those surgeons.
2. For each patient in the data, find out who the surgeon was and plug in that surgeon's average infection rate as a feature.
Does this pose any target leakage issues?
Does it pose any train-test contamination issues?
```
# Check your answer (Run this code cell to receive credit!)
q_4.check()
```
# Step 5: Housing Prices
You will build a model to predict housing prices. The model will be deployed on an ongoing basis, to predict the price of a new house when a description is added to a website. Here are four features that could be used as predictors.
1. Size of the house (in square meters)
2. Average sales price of homes in the same neighborhood
3. Latitude and longitude of the house
4. Whether the house has a basement
You have historic data to train and validate the model.
Which of the features is most likely to be a source of leakage?
```
# Fill in the line below with one of 1, 2, 3 or 4.
potential_leakage_feature = 2
# Check your answer
q_5.check()
#q_5.hint()
#q_5.solution()
```
# Conclusion
Leakage is a hard and subtle issue. You should be proud if you picked up on the issues in these examples.
Now you have the tools to make highly accurate models, and pick up on the most difficult practical problems that arise with applying these models to solve real problems.
There is still a lot of room to build knowledge and experience. Try out a [Competition](https://www.kaggle.com/competitions) or look through our [Datasets](https://kaggle.com/datasets) to practice your new skills.
Again, Congratulations!
---
*Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161289) to chat with other Learners.*
| github_jupyter |
# Signals and Sampling
In this notebook, we will be exploring how signals look, how they are processed and sampled. We will be using the healthy cough sound to explore these properties of signals.
This script is based on the Standford MIR project found [here](https://github.com/stevetjoa/stanford-mir).
Use [librosa.load](https://librosa.github.io/librosa/generated/librosa.core.load.html#librosa.core.load) to load an audio file into an audio array. Return both the audio array as well as the sample rate:
```
import librosa
fileName = '/home/shakes/Dev/workspace/trunk/courses/ELEC3004/Signal_sample/HealthyCoughs.wav'
x, sr = librosa.load(fileName)
```
If you receive an error with librosa.load, you may need to [install ffmpeg](https://librosa.github.io/librosa/install.html#ffmpeg).
Display the length of the audio array and sample rate:
```
print x.shape
print sr
```
### Visualizing Audio
In order to display plots inside the Jupyter notebook, run the following commands, preferably at the top of your notebook:
```
%matplotlib inline
import seaborn # optional
import matplotlib.pyplot as plt
import librosa.display
```
Plot the audio array using [librosa.display.waveplot](https://librosa.github.io/librosa/generated/librosa.display.waveplot.html#librosa.display.waveplot):
```
plt.figure(figsize=(12, 4))
librosa.display.waveplot(x, sr=sr)
```
Display a spectrogram using [librosa.display.specshow](https://librosa.github.io/librosa/generated/librosa.display.specshow.html):
```
X = librosa.stft(x)
Xdb = librosa.amplitude_to_db(X)
plt.figure(figsize=(12, 5))
librosa.display.specshow(Xdb, sr=sr, x_axis='time', y_axis='hz')
```
### Playing and Writing Audio
Using [IPython.display.Audio](http://ipython.org/ipython-doc/2/api/generated/IPython.lib.display.html#IPython.lib.display.Audio), you can play an audio file:
```
import IPython.display as ipd
ipd.Audio(fileName) # load a local WAV file
```
Audio can also accept a NumPy array. Let's synthesize a pure tone at 440 Hz:
```
import numpy
sr = 22050 # sample rate
T = 2.0 # seconds
t = numpy.linspace(0, T, int(T*sr), endpoint=False) # time variable
tone = 0.5*numpy.sin(2*numpy.pi*440*t) # pure sine wave at 440 Hz
```
Listen to the audio array:
```
ipd.Audio(tone, rate=sr) # load a NumPy array
```
Let's do some basic processing to the cough audio signal. We will do a low pass filter of the signal the old fashioned way
```
X = numpy.fft.rfft(x)
#Determine the frequencies of the FFT
W = numpy.fft.fftfreq(X.size, d=1.0/sr)
# If our original signal time was in seconds, this is now in Hz
# low pass signal
cut_X = X.copy()
cut_X[(W<100)] = 0
cut_signal = numpy.fft.irfft(cut_X)
```
Plot the audio array using [librosa.display.waveplot](https://librosa.github.io/librosa/generated/librosa.display.waveplot.html#librosa.display.waveplot) or [librosa.display.specshow](https://librosa.github.io/librosa/generated/librosa.display.specshow.html):
```
#plt.figure(figsize=(12, 4))
#librosa.display.waveplot(cut_signal, sr=sr)
X = librosa.stft(cut_signal)
Xdb = librosa.amplitude_to_db(X)
plt.figure(figsize=(12, 5))
librosa.display.specshow(Xdb, sr=sr, x_axis='time', y_axis='hz')
```
Play the result
```
ipd.Audio(cut_signal, rate=sr) # load a NumPy array
```
librosa.output.write_wav saves a NumPy array to a WAV file.
```
outFileName = '/home/shakes/Dev/workspace/trunk/courses/ELEC3004/Signal_sample/HealthyCough_lpass.wav'
librosa.output.write_wav(outFileName, x, sr)
```
How would you do high pass filtering?
| github_jupyter |
# Setup
---
```
from graphqlclient import GraphQLClient
import pandas as pd
ENDPOINT = "https://api.thegraph.com/subgraphs/name/blocklytics/bancor"
client = GraphQLClient(ENDPOINT)
```
# Fetch data
---
```
#ย Results must be paginated.
#ย Subgraphs return a maximum of 100 rows.
limit = 100
offset = 0
fetching_results = True
converters = []
# Fetch paginated results
while fetching_results:
# This query manually removes certain converters
# See https://blocklytics.org/blog/bancor-subgraph/
QUERY = """
{{
converters(
first:{0},
skip:{1},
where: {{
id_not_in: ["0x77feb788c747a701eb65b8d3b522302aaf26b1e2", "0xcbc6a023eb975a1e2630223a7959988948e664f3", "0x11614c5f1eb215ecffe657da56d3dd12df395dc8", "0x2769eb86e3acdda921c4f36cfe6cad035d95d31b", "0x2ac0e433c3c9ad816db79852d6f933b0b117aefe", "0x37c88474b5d6c593bbd2e4ce16635c08f8215b1e", "0x445556b7215349b205997aaaf6c6dfa258eb029d", "0x46ffcdc6d8e6ed69f124d944bbfe0ac74f8fcf7f", "0x587044b74004e3d5ef2d453b7f8d198d9e4cb558"]
}}
) {{
id
smartToken {{
id
}}
tokenBalances {{
token {{
id
symbol
name
decimals
}}
balance
}}
}}
}}
""".format(limit, offset)
result = json.loads(client.execute(QUERY))
converters += result['data']['converters']
# Prepare for pagination
result_length = len(result['data']['converters'])
if limit == result_length:
offset += limit
else:
fetching_results = False
# Load data into a new df
df = pd.DataFrame()
# Iterate over converters
i = 0
for converter in converters:
# Skip empty converters
if len(converter['tokenBalances']) == 0:
continue
converter_address = converter['id']
smart_token_address = converter['smartToken']['id']
df.at[i, 'exchange'] = converter_address
#ย Iterate over token balances
for tokenBalance in converter['tokenBalances']:
token = tokenBalance['token']['id']
# Skip converter's smart token
# See https://blocklytics.org/blog/bancor-subgraph/
if token == smart_token_address:
continue
# Handle remaining token details
balance = tokenBalance['balance']
symbol = tokenBalance['token']['symbol']
name = tokenBalance['token']['name']
decimals = tokenBalance['token']['decimals']
try: # try/catch for missing token details
balance_converted = float(balance) / 10 ** float(decimals)
except:
print("Could not find decimals for {0}. Assumed 18".format(token))
balance_converted = float(balance) / 10 ** float(18)
# Set base token to BNT or USDB
if 'base' in df.columns and df.base.isna().iloc[i] == False:
# Base has already been set for this converter
df.at[i, 'token'] = token
df.at[i, 'tokenSymbol'] = symbol
df.at[i, 'tokenName'] = name
df.at[i, 'tokenLiquidity'] = balance_converted
else:
# No base has been set for this converter
if token == '0x1f573d6fb3f13d689ff844b4ce37794d79a7ff1c' or token == '0x309627af60f0926daa6041b8279484312f2bf060':
# Bancor converts use BNT or USDB base
df.at[i, 'base'] = token
df.at[i, 'baseSymbol'] = symbol
# df.at[i, 'baseName'] = name
df.at[i, 'baseLiquidity'] = balance_converted
else:
df.at[i, 'token'] = token
df.at[i, 'tokenSymbol'] = symbol
# df.at[i, 'tokenName'] = name
df.at[i, 'tokenLiquidity'] = balance_converted
i += 1
df['basePrice'] = df['baseLiquidity'] / df['tokenLiquidity'] # Assumes 50% weight
print(df.shape)
```
# Result
---
## USDB Converters
```
df[df.baseSymbol == "USDB"][['baseSymbol', 'baseLiquidity', 'tokenSymbol', 'tokenLiquidity', 'basePrice']]\
.sort_values(by='baseLiquidity', ascending=False)\
.reset_index(drop=True)\
.head(10)
```
## BNT Converters
```
df[df.baseSymbol == "BNT"][['baseSymbol', 'baseLiquidity', 'tokenSymbol', 'tokenLiquidity', 'basePrice']]\
.sort_values(by='baseLiquidity', ascending=False)\
.reset_index(drop=True)\
.head(10)
```
| github_jupyter |
# The TxTl Toolbox in BioCRNpyler
### A recreation of the original MATLAB TxTl Toolbox, as seen in [Singhal et al. 2020](https://www.biorxiv.org/content/10.1101/2020.08.05.237990v1)
This tutorial shows how to use the EnergyTxTlExtract Mixture with a parameter file derived from the paper above. This Mixture is a simplification of the models used in the original toolbox. Notable changes include:
1. Using only a single nucleotide species NTPs (instead of GTP, ATP, UTP, and CTP)
2. A slightly different NTP regeneration Mechanism which explicitly incorporates the amount of fuel, 3PGA, put into the extract and metabolic leak of the extract.
3. Degredation of RNA bound to ribosomes (which releases the ribosome).
4. A modification of the Energy consumption reactions for Transcription and Translation so that there is only a single binding reaction.
## The CRN displayed below shows the energy utilization process model
```
from biocrnpyler import *
#A = DNAassembly("A", promoter = "P", rbs = "rbs")
E = EnergyTxTlExtract(parameter_file = "txtl_toolbox_parameters.txt")
CRN = E.compile_crn()
print(CRN.pretty_print())
try:
import numpy as np
maxtime = 30000
timepoints = np.arange(0, maxtime, 100)
R = CRN.simulate_with_bioscrape_via_sbml(timepoints)
if R is not None:
%matplotlib inline
import pylab as plt
plt.plot(timepoints, R[str(E.ntps.get_species())], label = E.ntps.get_species())
plt.plot(timepoints, R[str(E.amino_acids.get_species())], label = E.amino_acids.get_species())
plt.plot(timepoints, R[str(E.fuel.get_species())], label = E.fuel.get_species())
plt.xticks(np.arange(0, maxtime, 3600), [str(i) for i in range(0, int(np.ceil(maxtime/3600)))])
plt.legend()
except ModuleNotFoundError:
print('please install the plotting libraries: pip install biocrnpyler[all]')
```
## Adding a DNA assembly
This will produce protein expression, but for a limited time. The
```
A = DNAassembly("A", promoter = "P", rbs = "rbs", initial_concentration = 1*10**-6)
E = EnergyTxTlExtract(components = [A], parameter_file = "txtl_toolbox_parameters.txt")
CRN = E.compile_crn()
print(CRN.pretty_print())
try:
maxtime = 30000
timepoints = np.arange(0, maxtime, 100)
R = CRN.simulate_with_bioscrape_via_sbml(timepoints)
if R is not None:
%matplotlib inline
plt.subplot(121)
plt.plot(timepoints, R[str(E.ntps.get_species())], label = E.ntps.get_species())
plt.plot(timepoints, R[str(E.amino_acids.get_species())], label = E.amino_acids.get_species())
plt.plot(timepoints, R[str(E.fuel.get_species())], label = E.fuel.get_species())
plt.xticks(np.arange(0, maxtime, 3600), [str(i) for i in range(0, int(np.ceil(maxtime/3600)))])
plt.legend()
plt.subplot(122)
plt.plot(timepoints, R[str(A.transcript)], label = A.transcript)
plt.plot(timepoints, R[str(A.protein)], label = A.protein)
plt.xticks(np.arange(0, maxtime, 3600), [str(i) for i in range(0, int(np.ceil(maxtime/3600)))])
plt.legend()
except ModuleNotFoundError:
print('please install the plotting libraries: pip install biocrnpyler[all]')
```
| github_jupyter |
Deep Learning
=============
Assignment 1
------------
The objective of this assignment is to learn about simple data curation practices, and familiarize you with some of the data we'll be reusing later.
This notebook uses the [notMNIST](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html) dataset to be used with python experiments. This dataset is designed to look like the classic [MNIST](http://yann.lecun.com/exdb/mnist/) dataset, while looking a little more like real data: it's a harder task, and the data is a lot less 'clean' than MNIST.
```
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from IPython.display import display, Image
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
# Config the matplotlib backend as plotting inline in IPython
%matplotlib inline
```
First, we'll download the dataset to our local machine. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes). The training set has about 500k and the testset 19000 labeled examples. Given these sizes, it should be possible to train models quickly on any machine.
```
url = 'https://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
data_root = '.' # Change me to store data elsewhere
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 5% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
dest_filename = os.path.join(data_root, filename)
if force or not os.path.exists(dest_filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(dest_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', dest_filename)
else:
raise Exception(
'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')
return dest_filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
```
Extract the dataset from the compressed .tar.gz file.
This should give you a set of directories, labeled A through J.
```
num_classes = 10
np.random.seed(133)
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall(data_root)
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
```
---
Problem 1
---------
Let's take a peek at some of the data to make sure it looks sensible. Each exemplar should be an image of a character A through J rendered in a different font. Display a sample of the images that we just downloaded. Hint: you can use the package IPython.display.
---
Now let's load the data in a more manageable format. Since, depending on your computer setup you might not be able to fit it all in memory, we'll load each class into a separate dataset, store them on disk and curate them independently. Later we'll merge them into a single dataset of manageable size.
We'll convert the entire dataset into a 3D array (image index, x, y) of floating point values, normalized to have approximately zero mean and standard deviation ~0.5 to make training easier down the road.
A few images might not be readable, we'll just skip them.
```
image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (imageio.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except (IOError, ValueError) as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
train_datasets = maybe_pickle(train_folders, 45000)
test_datasets = maybe_pickle(test_folders, 1800)
```
---
Problem 2
---------
Let's verify that the data still looks good. Displaying a sample of the labels and images from the ndarray. Hint: you can use matplotlib.pyplot.
---
---
Problem 3
---------
Another check: we expect the data to be balanced across classes. Verify that.
---
Merge and prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune `train_size` as needed. The labels will be stored into a separate array of integers 0 through 9.
Also create a validation dataset for hyperparameter tuning.
```
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
train_size = 200000
valid_size = 10000
test_size = 10000
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
```
Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match.
```
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
```
---
Problem 4
---------
Convince yourself that the data is still good after shuffling!
---
Finally, let's save the data for later reuse:
```
pickle_file = os.path.join(data_root, 'notMNIST.pickle')
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
```
---
Problem 5
---------
By construction, this dataset might contain a lot of overlapping samples, including training data that's also contained in the validation and test set! Overlap between training and test can skew the results if you expect to use your model in an environment where there is never an overlap, but are actually ok if you expect to see training samples recur when you use it.
Measure how much overlap there is between training, validation and test samples.
Optional questions:
- What about near duplicates between datasets? (images that are almost identical)
- Create a sanitized validation and test set, and compare your accuracy on those in subsequent assignments.
---
---
Problem 6
---------
Let's get an idea of what an off-the-shelf classifier can give you on this data. It's always good to check that there is something to learn, and that it's a problem that is not so trivial that a canned solution solves it.
Train a simple model on this data using 50, 100, 1000 and 5000 training samples. Hint: you can use the LogisticRegression model from sklearn.linear_model.
Optional question: train an off-the-shelf model on all the data!
---
| github_jupyter |
# LAB 03: Basic Feature Engineering in Keras
**Learning Objectives**
1. Create an input pipeline using tf.data
2. Engineer features to create categorical, crossed, and numerical feature columns
## Introduction
In this lab, we utilize feature engineering to improve the prediction of housing prices using a Keras Sequential Model.
Each learning objective will correspond to a __#TODO__ in the notebook where you will complete the notebook cell's code before running. Refer to the [solution](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/feature_engineering/solutions/3_keras_basic_feat_eng.ipynb) for reference.
Start by importing the necessary libraries for this lab.
```
# Install Sklearn
!python3 -m pip install --user sklearn
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.1 || pip install tensorflow==2.1
```
**Note:** After executing the above cell you will see the output
`tensorflow==2.1.0` that is the installed version of tensorflow.
```
import os
import tensorflow.keras
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from tensorflow import feature_column as fc
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
from keras.utils import plot_model
print("TensorFlow version: ",tf.version.VERSION)
```
Many of the Google Machine Learning Courses Programming Exercises use the [California Housing Dataset](https://developers.google.com/machine-learning/crash-course/california-housing-data-description
), which contains data drawn from the 1990 U.S. Census. Our lab dataset has been pre-processed so that there are no missing values.
First, let's download the raw .csv data by copying the data from a cloud storage bucket.
```
if not os.path.isdir("../data"):
os.makedirs("../data")
!gsutil cp gs://cloud-training-demos/feat_eng/housing/housing_pre-proc.csv ../data
!ls -l ../data/
```
Now, let's read in the dataset just copied from the cloud storage bucket and create a Pandas dataframe.
```
housing_df = pd.read_csv('../data/housing_pre-proc.csv', error_bad_lines=False)
housing_df.head()
```
We can use .describe() to see some summary statistics for the numeric fields in our dataframe. Note, for example, the count row and corresponding columns. The count shows 20433.000000 for all feature columns. Thus, there are no missing values.
```
housing_df.describe()
```
#### Split the dataset for ML
The dataset we loaded was a single CSV file. We will split this into train, validation, and test sets.
```
train, test = train_test_split(housing_df, test_size=0.2)
train, val = train_test_split(train, test_size=0.2)
print(len(train), 'train examples')
print(len(val), 'validation examples')
print(len(test), 'test examples')
```
Now, we need to output the split files. We will specifically need the test.csv later for testing. You should see the files appear in the home directory.
```
train.to_csv('../data/housing-train.csv', encoding='utf-8', index=False)
val.to_csv('../data/housing-val.csv', encoding='utf-8', index=False)
test.to_csv('../data/housing-test.csv', encoding='utf-8', index=False)
!head ../data/housing*.csv
```
## Lab Task 1: Create an input pipeline using tf.data
Next, we will wrap the dataframes with [tf.data](https://www.tensorflow.org/guide/datasets). This will enable us to use feature columns as a bridge to map from the columns in the Pandas dataframe to features used to train the model.
Here, we create an input pipeline using tf.data. This function is missing two lines. Correct and run the cell.
```
# A utility method to create a tf.data dataset from a Pandas Dataframe
def df_to_dataset(dataframe, shuffle=True, batch_size=32):
dataframe = dataframe.copy()
# TODO 1a -- Your code here
if shuffle:
ds = ds.shuffle(buffer_size=len(dataframe))
ds = ds.batch(batch_size)
return ds
```
Next we initialize the training and validation datasets.
```
batch_size = 32
train_ds = df_to_dataset(train)
val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size)
```
Now that we have created the input pipeline, let's call it to see the format of the data it returns. We have used a small batch size to keep the output readable.
```
# TODO 1b -- Your code here
```
We can see that the dataset returns a dictionary of column names (from the dataframe) that map to column values from rows in the dataframe.
#### Numeric columns
The output of a feature column becomes the input to the model. A numeric is the simplest type of column. It is used to represent real valued features. When using this column, your model will receive the column value from the dataframe unchanged.
In the California housing prices dataset, most columns from the dataframe are numeric. Let' create a variable called **numeric_cols** to hold only the numerical feature columns.
```
# TODO 1c -- Your code here
```
#### Scaler function
It is very important for numerical variables to get scaled before they are "fed" into the neural network. Here we use min-max scaling. Here we are creating a function named 'get_scal' which takes a list of numerical features and returns a 'minmax' function, which will be used in tf.feature_column.numeric_column() as normalizer_fn in parameters. 'Minmax' function itself takes a 'numerical' number from a particular feature and return scaled value of that number.
Next, we scale the numerical feature columns that we assigned to the variable "numeric cols".
```
# Scalar def get_scal(feature):
# TODO 1d -- Your code here
# TODO 1e -- Your code here
```
Next, we should validate the total number of feature columns. Compare this number to the number of numeric features you input earlier.
```
print('Total number of feature coLumns: ', len(feature_columns))
```
### Using the Keras Sequential Model
Next, we will run this cell to compile and fit the Keras Sequential model.
```
# Model create
feature_layer = tf.keras.layers.DenseFeatures(feature_columns, dtype='float64')
model = tf.keras.Sequential([
feature_layer,
layers.Dense(12, input_dim=8, activation='relu'),
layers.Dense(8, activation='relu'),
layers.Dense(1, activation='linear', name='median_house_value')
])
# Model compile
model.compile(optimizer='adam',
loss='mse',
metrics=['mse'])
# Model Fit
history = model.fit(train_ds,
validation_data=val_ds,
epochs=32)
```
Next we show loss as Mean Square Error (MSE). Remember that MSE is the most commonly used regression loss function. MSE is the sum of squared distances between our target variable (e.g. housing median age) and predicted values.
```
loss, mse = model.evaluate(train_ds)
print("Mean Squared Error", mse)
```
#### Visualize the model loss curve
Next, we will use matplotlib to draw the model's loss curves for training and validation. A line plot is also created showing the mean squared error loss over the training epochs for both the train (blue) and test (orange) sets.
```
def plot_curves(history, metrics):
nrows = 1
ncols = 2
fig = plt.figure(figsize=(10, 5))
for idx, key in enumerate(metrics):
ax = fig.add_subplot(nrows, ncols, idx+1)
plt.plot(history.history[key])
plt.plot(history.history['val_{}'.format(key)])
plt.title('model {}'.format(key))
plt.ylabel(key)
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left');
plot_curves(history, ['loss', 'mse'])
```
### Load test data
Next, we read in the test.csv file and validate that there are no null values.
Again, we can use .describe() to see some summary statistics for the numeric fields in our dataframe. The count shows 4087.000000 for all feature columns. Thus, there are no missing values.
```
test_data = pd.read_csv('../data/housing-test.csv')
test_data.describe()
```
Now that we have created an input pipeline using tf.data and compiled a Keras Sequential Model, we now create the input function for the test data and to initialize the test_predict variable.
```
# TODO 1f -- Your code here
test_predict = test_input_fn(dict(test_data))
```
#### Prediction: Linear Regression
Before we begin to feature engineer our feature columns, we should predict the median house value. By predicting the median house value now, we can then compare it with the median house value after feature engineeing.
To predict with Keras, you simply call [model.predict()](https://keras.io/models/model/#predict) and pass in the housing features you want to predict the median_house_value for. Note: We are predicting the model locally.
```
predicted_median_house_value = model.predict(test_predict)
```
Next, we run two predictions in separate cells - one where ocean_proximity=INLAND and one where ocean_proximity= NEAR OCEAN.
```
# Ocean_proximity is INLAND
model.predict({
'longitude': tf.convert_to_tensor([-121.86]),
'latitude': tf.convert_to_tensor([39.78]),
'housing_median_age': tf.convert_to_tensor([12.0]),
'total_rooms': tf.convert_to_tensor([7653.0]),
'total_bedrooms': tf.convert_to_tensor([1578.0]),
'population': tf.convert_to_tensor([3628.0]),
'households': tf.convert_to_tensor([1494.0]),
'median_income': tf.convert_to_tensor([3.0905]),
'ocean_proximity': tf.convert_to_tensor(['INLAND'])
}, steps=1)
# Ocean_proximity is NEAR OCEAN
model.predict({
'longitude': tf.convert_to_tensor([-122.43]),
'latitude': tf.convert_to_tensor([37.63]),
'housing_median_age': tf.convert_to_tensor([34.0]),
'total_rooms': tf.convert_to_tensor([4135.0]),
'total_bedrooms': tf.convert_to_tensor([687.0]),
'population': tf.convert_to_tensor([2154.0]),
'households': tf.convert_to_tensor([742.0]),
'median_income': tf.convert_to_tensor([4.9732]),
'ocean_proximity': tf.convert_to_tensor(['NEAR OCEAN'])
}, steps=1)
```
The arrays returns a predicted value. What do these numbers mean? Let's compare this value to the test set.
Go to the test.csv you read in a few cells up. Locate the first line and find the median_house_value - which should be 249,000 dollars near the ocean. What value did your model predicted for the median_house_value? Was it a solid model performance? Let's see if we can improve this a bit with feature engineering!
## Lab Task 2: Engineer features to create categorical and numerical features
Now we create a cell that indicates which features will be used in the model.
Note: Be sure to bucketize 'housing_median_age' and ensure that 'ocean_proximity' is one-hot encoded. And, don't forget your numeric values!
```
# TODO 2a -- Your code here
```
Next, we scale the numerical, bucktized, and categorical feature columns that we assigned to the variables in the precding cell.
```
# Scalar def get_scal(feature):
def get_scal(feature):
def minmax(x):
mini = train[feature].min()
maxi = train[feature].max()
return (x - mini)/(maxi-mini)
return(minmax)
# All numerical features - scaling
feature_columns = []
for header in numeric_cols:
scal_input_fn = get_scal(header)
feature_columns.append(fc.numeric_column(header,
normalizer_fn=scal_input_fn))
```
### Categorical Feature
In this dataset, 'ocean_proximity' is represented as a string. We cannot feed strings directly to a model. Instead, we must first map them to numeric values. The categorical vocabulary columns provide a way to represent strings as a one-hot vector.
Next, we create a categorical feature using 'ocean_proximity'.
```
# TODO 2b -- Your code here
```
### Bucketized Feature
Often, you don't want to feed a number directly into the model, but instead split its value into different categories based on numerical ranges. Consider our raw data that represents a homes' age. Instead of representing the house age as a numeric column, we could split the home age into several buckets using a [bucketized column](https://www.tensorflow.org/api_docs/python/tf/feature_column/bucketized_column). Notice the one-hot values below describe which age range each row matches.
Next we create a bucketized column using 'housing_median_age'
```
# TODO 2c -- Your code here
```
### Feature Cross
Combining features into a single feature, better known as [feature crosses](https://developers.google.com/machine-learning/glossary/#feature_cross), enables a model to learn separate weights for each combination of features.
Next, we create a feature cross of 'housing_median_age' and 'ocean_proximity'.
```
# TODO 2d -- Your code here
```
Next, we should validate the total number of feature columns. Compare this number to the number of numeric features you input earlier.
```
print('Total number of feature coumns: ', len(feature_columns))
```
Next, we will run this cell to compile and fit the Keras Sequential model. This is the same model we ran earlier.
```
# Model create
feature_layer = tf.keras.layers.DenseFeatures(feature_columns,
dtype='float64')
model = tf.keras.Sequential([
feature_layer,
layers.Dense(12, input_dim=8, activation='relu'),
layers.Dense(8, activation='relu'),
layers.Dense(1, activation='linear', name='median_house_value')
])
# Model compile
model.compile(optimizer='adam',
loss='mse',
metrics=['mse'])
# Model Fit
history = model.fit(train_ds,
validation_data=val_ds,
epochs=32)
```
Next, we show loss and mean squared error then plot the model.
```
loss, mse = model.evaluate(train_ds)
print("Mean Squared Error", mse)
plot_curves(history, ['loss', 'mse'])
```
Next we create a prediction model. Note: You may use the same values from the previous prediciton.
```
# TODO 2e -- Your code here
```
### Analysis
The array returns a predicted value. Compare this value to the test set you ran earlier. Your predicted value may be a bit better.
Now that you have your "feature engineering template" setup, you can experiment by creating additional features. For example, you can create derived features, such as households per population, and see how they impact the model. You can also experiment with replacing the features you used to create the feature cross.
Copyright 2020 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| github_jupyter |
```
# default_exp models.XCMPlus
```
# XCM (An Explainable Convolutional Neural Network for Multivariate Time Series Classification)
> This is an unofficial PyTorch implementation by Ignacio Oguiza of - oguiza@gmail.com based on Temporal Convolutional Network (Bai, 2018).
**References:**
* Fauvel, K., Lin, T., Masson, V., Fromont, ร., & Termier, A. (2020). XCM: An Explainable Convolutional Neural Network for Multivariate Time Series Classification. arXiv preprint arXiv:2009.04796.
* Official XCM PyTorch implementation: not available as of Nov 27th, 2020
```
#export
from tsai.imports import *
from tsai.utils import *
from tsai.models.layers import *
from tsai.models.utils import *
from tsai.models.explainability import *
#export
# This is an unofficial PyTorch implementation by Ignacio Oguiza - oguiza@gmail.com based on:
# Fauvel, K., Lin, T., Masson, V., Fromont, ร., & Termier, A. (2020). XCM: An Explainable Convolutional Neural Network for
# Multivariate Time Series Classification. arXiv preprint arXiv:2009.04796.
# Official XCM PyTorch implementation: not available as of Nov 27th, 2020
class XCMPlus(nn.Sequential):
def __init__(self, c_in:int, c_out:int, seq_len:Optional[int]=None, nf:int=128, window_perc:float=1., flatten:bool=False, custom_head:callable=None,
concat_pool:bool=False, fc_dropout:float=0., bn:bool=False, y_range:tuple=None, **kwargs):
window_size = int(round(seq_len * window_perc, 0))
backbone = _XCMPlus_Backbone(c_in, c_out, seq_len=seq_len, nf=nf, window_perc=window_perc)
self.head_nf = nf
self.c_out = c_out
self.seq_len = seq_len
if custom_head: head = custom_head(self.head_nf, c_out, seq_len, **kwargs)
else: head = self.create_head(self.head_nf, c_out, seq_len, flatten=flatten, concat_pool=concat_pool,
fc_dropout=fc_dropout, bn=bn, y_range=y_range)
super().__init__(OrderedDict([('backbone', backbone), ('head', head)]))
def create_head(self, nf, c_out, seq_len=None, flatten=False, concat_pool=False, fc_dropout=0., bn=False, y_range=None):
if flatten:
nf *= seq_len
layers = [Flatten()]
else:
if concat_pool: nf *= 2
layers = [GACP1d(1) if concat_pool else GAP1d(1)]
layers += [LinBnDrop(nf, c_out, bn=bn, p=fc_dropout)]
if y_range: layers += [SigmoidRange(*y_range)]
return nn.Sequential(*layers)
def show_gradcam(self, x, y=None, detach=True, cpu=True, apply_relu=True, cmap='inferno', figsize=None, **kwargs):
att_maps = get_attribution_map(self, [self.backbone.conv2dblock, self.backbone.conv1dblock], x, y=y, detach=detach, cpu=cpu, apply_relu=apply_relu)
att_maps[0] = (att_maps[0] - att_maps[0].min()) / (att_maps[0].max() - att_maps[0].min())
att_maps[1] = (att_maps[1] - att_maps[1].min()) / (att_maps[1].max() - att_maps[1].min())
figsize = ifnone(figsize, (10, 10))
fig = plt.figure(figsize=figsize, **kwargs)
ax = plt.axes()
plt.title('Observed variables')
im = ax.imshow(att_maps[0], cmap=cmap)
cax = fig.add_axes([ax.get_position().x1+0.01,ax.get_position().y0,0.02,ax.get_position().height])
plt.colorbar(im, cax=cax)
plt.show()
fig = plt.figure(figsize=figsize, **kwargs)
ax = plt.axes()
plt.title('Time')
im = ax.imshow(att_maps[1], cmap=cmap)
cax = fig.add_axes([ax.get_position().x1+0.01,ax.get_position().y0,0.02,ax.get_position().height])
plt.colorbar(im, cax=cax)
plt.show()
class _XCMPlus_Backbone(Module):
def __init__(self, c_in:int, c_out:int, seq_len:Optional[int]=None, nf:int=128, window_perc:float=1.):
window_size = int(round(seq_len * window_perc, 0))
self.conv2dblock = nn.Sequential(*[Unsqueeze(1), Conv2d(1, nf, kernel_size=(1, window_size), padding='same'), BatchNorm(nf), nn.ReLU()])
self.conv2d1x1block = nn.Sequential(*[nn.Conv2d(nf, 1, kernel_size=1), nn.ReLU(), Squeeze(1)])
self.conv1dblock = nn.Sequential(*[Conv1d(c_in, nf, kernel_size=window_size, padding='same'), BatchNorm(nf, ndim=1), nn.ReLU()])
self.conv1d1x1block = nn.Sequential(*[nn.Conv1d(nf, 1, kernel_size=1), nn.ReLU()])
self.concat = Concat()
self.conv1d = nn.Sequential(*[Conv1d(c_in + 1, nf, kernel_size=window_size, padding='same'), BatchNorm(nf, ndim=1), nn.ReLU()])
def forward(self, x):
x1 = self.conv2dblock(x)
x1 = self.conv2d1x1block(x1)
x2 = self.conv1dblock(x)
x2 = self.conv1d1x1block(x2)
out = self.concat((x2, x1))
out = self.conv1d(out)
return out
from tsai.data.all import *
from tsai.models.XCM import *
dsid = 'NATOPS'
X, y, splits = get_UCR_data(dsid, split_data=False)
tfms = [None, Categorize()]
dls = get_ts_dls(X, y, splits=splits, tfms=tfms)
model = XCMPlus(dls.vars, dls.c, dls.len)
learn = Learner(dls, model, metrics=accuracy)
xb, yb = dls.one_batch()
bs, c_in, seq_len = xb.shape
c_out = len(np.unique(yb))
model = XCMPlus(c_in, c_out, seq_len, fc_dropout=.5)
test_eq(model(xb).shape, (bs, c_out))
model = XCMPlus(c_in, c_out, seq_len, concat_pool=True)
test_eq(model(xb).shape, (bs, c_out))
model = XCMPlus(c_in, c_out, seq_len)
test_eq(model(xb).shape, (bs, c_out))
test_eq(count_parameters(XCMPlus(c_in, c_out, seq_len)), count_parameters(XCM(c_in, c_out, seq_len)))
model
model.show_gradcam(xb[0], yb[0])
bs = 16
n_vars = 3
seq_len = 12
c_out = 10
xb = torch.rand(bs, n_vars, seq_len)
new_head = partial(conv_lin_3d_head, d=(5, 2))
net = XCMPlus(n_vars, c_out, seq_len, custom_head=new_head)
print(net(xb).shape)
net.head
bs = 16
n_vars = 3
seq_len = 12
c_out = 2
xb = torch.rand(bs, n_vars, seq_len)
net = XCMPlus(n_vars, c_out, seq_len)
change_model_head(net, create_pool_plus_head, concat_pool=False)
print(net(xb).shape)
net.head
#hide
out = create_scripts(); beep(out)
```
| github_jupyter |
## Intro to deep learning for medical imaging by [MD.ai](https://www.md.ai)
## Lesson 3. RSNA Pneumonia Detection Challenge (Kaggel API)
The [Radiological Society of North America](http://www.rsna.org/) Pneumonia Detection Challenge: https://www.kaggle.com/c/rsna-pneumonia-detection-challenge
This notebook covers the basics of parsing the competition dataset, training using a detector basd on the [Mask-RCNN algorithm](https://arxiv.org/abs/1703.06870) for object detection and instance segmentation.
This notebook is developed by [MD.ai](https://www.md.ai), the platform for medical AI.
This notebook requires Google's [TensorFlow](https://www.tensorflow.org/) machine learning framework.
**Intro to deep learning for medical imaging lessons**
- Lesson 1. Classification of chest vs. adominal X-rays using TensorFlow/Keras [Github](https://github.com/mdai/ml-lessons/blob/master/lesson1-xray-images-classification.ipynb) [Annotator](https://public.md.ai/annotator/project/PVq9raBJ)
- Lesson 2. Lung X-Rays Semantic Segmentation using UNets. [Github](https://github.com/mdai/ml-lessons/blob/master/lesson2-lung-xrays-segmentation.ipynb)
[Annotator](https://public.md.ai/annotator/project/aGq4k6NW/workspace)
- Lesson 3. RSNA Pneumonia detection using Kaggle data format [Github](https://github.com/mdai/ml-lessons/blob/master/lesson3-rsna-pneumonia-detection-kaggle.ipynb) [Annotator](https://public.md.ai/annotator/project/LxR6zdR2/workspace)
- Lesson 3. RSNA Pneumonia detection using MD.ai python client library [Github](https://github.com/mdai/ml-lessons/blob/master/lesson3-rsna-pneumonia-detection-mdai-client-lib.ipynb) [Annotator](https://public.md.ai/annotator/project/LxR6zdR2/workspace)
*Copyright 2018 MD.ai, Inc.
Licensed under the Apache License, Version 2.0*
```
# install dependencies not included by Colab
# use pip3 to ensure compatibility w/ Google Deep Learning Images
!pip3 install -q pydicom
!pip3 install -q tqdm
!pip3 install -q imgaug
import os
import sys
import random
import math
import numpy as np
import cv2
import matplotlib.pyplot as plt
import json
import pydicom
from imgaug import augmenters as iaa
from tqdm import tqdm
import pandas as pd
import glob
```
### First: Install Kaggle API for download competition data.
```
# Install Kaggle API for download competition data
!pip3 install -q kaggle
```
### Next: You must accept the user agreement on the competition website! Then follow [instructions to obtain your Kaggle Credentials.](https://github.com/Kaggle/kaggle-api#api-credentials)
If you are unable to download the competition dataset, check to see if you have accepted the **user agreement** on the competition website.
```
# enter your Kaggle credentionals here
os.environ['KAGGLE_USERNAME']=""
os.environ['KAGGLE_KEY']=""
# Root directory of the project
ROOT_DIR = os.path.abspath('./lesson3-data')
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, 'logs')
if not os.path.exists(ROOT_DIR):
os.makedirs(ROOT_DIR)
os.chdir(ROOT_DIR)
# If you are unable to download the competition dataset, check to see if you have
# accepted the user agreement on the competition website.
!kaggle competitions download -c rsna-pneumonia-detection-challenge
```
### Data is downloaded as zip files. Unzip the test and train datasets as well as the csv of annotations.
```
# unzipping takes a few minutes
!unzip -q -o stage_1_test_images.zip -d stage_1_test_images
!unzip -q -o stage_1_train_images.zip -d stage_1_train_images
!unzip -q -o stage_1_train_labels.csv.zip
```
### MD.ai Annotator
Additionally, If you are interested in augmenting the existing annotations, you can use the MD.ai annotator to view DICOM images, and create annotatios to be exported.
MD.ai annotator project URL for the Kaggle dataset: https://public.md.ai/annotator/project/LxR6zdR2/workspace
**Annotator features**
- The annotator can be used to view DICOM images and create image and exam level annotations.
- You can apply the annotator to filter by label, adjudicate annotations, and assign annotation tasks to your team.
- Notebooks can be built directly within the annotator for rapid model development.
- The data wrangling is abstracted away by the interface and by our MD.ai library.
- Simplifies image annotation in order to widen the participation in the futrue of medical image deep learning.
The annotator allows you to create initial annotations, build and run models, modify/finetune the annotations based on predicted values, and repeat.
The MD.ai python client library implements functions to easily download images and annotations and to prepare the datasets used to train the model for classification. See the following example notebook for parsing annotations and training using MD.ai annotator:
https://github.com/mdai/ml-lessons/blob/master/lesson3-rsna-pneumonia-detection-mdai-client-lib.ipynb
- MD.ai URL: https://www.md.ai
- MD.ai documentation URL: https://docs.md.ai/
### Install Matterport's Mask-RCNN model from github.
See the [Matterport's implementation of Mask-RCNN](https://github.com/matterport/Mask_RCNN).
```
os.chdir(ROOT_DIR)
!git clone https://github.com/matterport/Mask_RCNN.git
os.chdir('Mask_RCNN')
!python setup.py -q install
# Import Mask RCNN
sys.path.append(os.path.join(ROOT_DIR, 'Mask_RCNN')) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.model import log
train_dicom_dir = os.path.join(ROOT_DIR, 'stage_1_train_images')
test_dicom_dir = os.path.join(ROOT_DIR, 'stage_1_test_images')
```
### Some setup functions and classes for Mask-RCNN
- dicom_fps is a list of the dicom image path and filenames
- image_annotions is a dictionary of the annotations keyed by the filenames
- parsing the dataset returns a list of the image filenames and the annotations dictionary
```
def get_dicom_fps(dicom_dir):
dicom_fps = glob.glob(dicom_dir+'/'+'*.dcm')
return list(set(dicom_fps))
def parse_dataset(dicom_dir, anns):
image_fps = get_dicom_fps(dicom_dir)
image_annotations = {fp: [] for fp in image_fps}
for index, row in anns.iterrows():
fp = os.path.join(dicom_dir, row['patientId']+'.dcm')
image_annotations[fp].append(row)
return image_fps, image_annotations
# The following parameters have been selected to reduce running time for demonstration purposes
# These are not optimal
class DetectorConfig(Config):
"""Configuration for training pneumonia detection on the RSNA pneumonia dataset.
Overrides values in the base Config class.
"""
# Give the configuration a recognizable name
NAME = 'pneumonia'
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 8
BACKBONE = 'resnet50'
NUM_CLASSES = 2 # background + 1 pneumonia classes
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 64
IMAGE_MAX_DIM = 64
RPN_ANCHOR_SCALES = (32, 64)
TRAIN_ROIS_PER_IMAGE = 16
MAX_GT_INSTANCES = 3
DETECTION_MAX_INSTANCES = 3
DETECTION_MIN_CONFIDENCE = 0.9
DETECTION_NMS_THRESHOLD = 0.1
RPN_TRAIN_ANCHORS_PER_IMAGE = 16
STEPS_PER_EPOCH = 100
TOP_DOWN_PYRAMID_SIZE = 32
STEPS_PER_EPOCH = 100
config = DetectorConfig()
config.display()
class DetectorDataset(utils.Dataset):
"""Dataset class for training pneumonia detection on the RSNA pneumonia dataset.
"""
def __init__(self, image_fps, image_annotations, orig_height, orig_width):
super().__init__(self)
# Add classes
self.add_class('pneumonia', 1, 'Lung Opacity')
# add images
for i, fp in enumerate(image_fps):
annotations = image_annotations[fp]
self.add_image('pneumonia', image_id=i, path=fp,
annotations=annotations, orig_height=orig_height, orig_width=orig_width)
def image_reference(self, image_id):
info = self.image_info[image_id]
return info['path']
def load_image(self, image_id):
info = self.image_info[image_id]
fp = info['path']
ds = pydicom.read_file(fp)
image = ds.pixel_array
# If grayscale. Convert to RGB for consistency.
if len(image.shape) != 3 or image.shape[2] != 3:
image = np.stack((image,) * 3, -1)
return image
def load_mask(self, image_id):
info = self.image_info[image_id]
annotations = info['annotations']
count = len(annotations)
if count == 0:
mask = np.zeros((info['orig_height'], info['orig_width'], 1), dtype=np.uint8)
class_ids = np.zeros((1,), dtype=np.int32)
else:
mask = np.zeros((info['orig_height'], info['orig_width'], count), dtype=np.uint8)
class_ids = np.zeros((count,), dtype=np.int32)
for i, a in enumerate(annotations):
if a['Target'] == 1:
x = int(a['x'])
y = int(a['y'])
w = int(a['width'])
h = int(a['height'])
mask_instance = mask[:, :, i].copy()
cv2.rectangle(mask_instance, (x, y), (x+w, y+h), 255, -1)
mask[:, :, i] = mask_instance
class_ids[i] = 1
return mask.astype(np.bool), class_ids.astype(np.int32)
```
### Examine the annotation data, parse the dataset, and view dicom fields
```
# training dataset
anns = pd.read_csv(os.path.join(ROOT_DIR, 'stage_1_train_labels.csv'))
anns.head(6)
image_fps, image_annotations = parse_dataset(train_dicom_dir, anns=anns)
ds = pydicom.read_file(image_fps[0]) # read dicom image from filepath
image = ds.pixel_array # get image array
# show dicom fields
ds
# Original DICOM image size: 1024 x 1024
ORIG_SIZE = 1024
```
### Split the data into training and validation datasets
**Note: We have only used only a portion of the images for demonstration purposes. See comments below.**
- To use all the images do: image_fps_list = list(image_fps)
- Or change the number of images from 100 to a custom number
```
######################################################################
# Modify this line to use more or fewer images for training/validation.
# To use all images, do: image_fps_list = list(image_fps)
image_fps_list = list(image_fps[:1000])
#####################################################################
# split dataset into training vs. validation dataset
# split ratio is set to 0.9 vs. 0.1 (train vs. validation, respectively)
sorted(image_fps_list)
random.seed(42)
random.shuffle(image_fps_list)
validation_split = 0.1
split_index = int((1 - validation_split) * len(image_fps_list))
image_fps_train = image_fps_list[:split_index]
image_fps_val = image_fps_list[split_index:]
print(len(image_fps_train), len(image_fps_val))
```
### Create and prepare the training dataset using the DetectorDataset class.
```
# prepare the training dataset
dataset_train = DetectorDataset(image_fps_train, image_annotations, ORIG_SIZE, ORIG_SIZE)
dataset_train.prepare()
```
### Let's look at a sample annotation. We see a bounding box with (x, y) of the the top left corner as well as the width and height.
```
# Show annotation(s) for a DICOM image
test_fp = random.choice(image_fps_train)
image_annotations[test_fp]
# prepare the validation dataset
dataset_val = DetectorDataset(image_fps_val, image_annotations, ORIG_SIZE, ORIG_SIZE)
dataset_val.prepare()
```
### Display a random image with bounding boxes
```
# Load and display random samples and their bounding boxes
# Suggestion: Run this a few times to see different examples.
image_id = random.choice(dataset_train.image_ids)
image_fp = dataset_train.image_reference(image_id)
image = dataset_train.load_image(image_id)
mask, class_ids = dataset_train.load_mask(image_id)
print(image.shape)
plt.figure(figsize=(10, 10))
plt.subplot(1, 2, 1)
plt.imshow(image[:, :, 0], cmap='gray')
plt.axis('off')
plt.subplot(1, 2, 2)
masked = np.zeros(image.shape[:2])
for i in range(mask.shape[2]):
masked += image[:, :, 0] * mask[:, :, i]
plt.imshow(masked, cmap='gray')
plt.axis('off')
print(image_fp)
print(class_ids)
model = modellib.MaskRCNN(mode='training', config=config, model_dir=MODEL_DIR)
```
### Image Augmentation. Try finetuning some variables to custom values
```
# Image augmentation
augmentation = iaa.SomeOf((0, 1), [
iaa.Fliplr(0.5),
iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-25, 25),
shear=(-8, 8)
),
iaa.Multiply((0.9, 1.1))
])
```
### Now it's time to train the model. Note that training even a basic model can take a few hours.
Note: the following model is for demonstration purpose only. We have limited the training to one epoch, and have set nominal values for the Detector Configuration to reduce run-time.
- dataset_train and dataset_val are derived from DetectorDataset
- DetectorDataset loads images from image filenames and masks from the annotation data
- model is Mask-RCNN
```
NUM_EPOCHS = 1
# Train Mask-RCNN Model
import warnings
warnings.filterwarnings("ignore")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=NUM_EPOCHS,
layers='all',
augmentation=augmentation)
# select trained model
dir_names = next(os.walk(model.model_dir))[1]
key = config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
import errno
raise FileNotFoundError(
errno.ENOENT,
"Could not find model directory under {}".format(self.model_dir))
fps = []
# Pick last directory
for d in dir_names:
dir_name = os.path.join(model.model_dir, d)
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
print('No weight files in {}'.format(dir_name))
else:
checkpoint = os.path.join(dir_name, checkpoints[-1])
fps.append(checkpoint)
model_path = sorted(fps)[-1]
print('Found model {}'.format(model_path))
class InferenceConfig(DetectorConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
inference_config = InferenceConfig()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode='inference',
config=inference_config,
model_dir=MODEL_DIR)
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
# set color for class
def get_colors_for_class_ids(class_ids):
colors = []
for class_id in class_ids:
if class_id == 1:
colors.append((.941, .204, .204))
return colors
```
### How does the predicted box compared to the expected value? Let's use the validation dataset to check.
Note that we trained only one epoch for **demonstration purposes ONLY**. You might be able to improve performance running more epochs.
```
# Show few example of ground truth vs. predictions on the validation dataset
dataset = dataset_val
fig = plt.figure(figsize=(10, 30))
for i in range(4):
image_id = random.choice(dataset.image_ids)
original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
plt.subplot(6, 2, 2*i + 1)
visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
dataset.class_names,
colors=get_colors_for_class_ids(gt_class_id), ax=fig.axes[-1])
plt.subplot(6, 2, 2*i + 2)
results = model.detect([original_image]) #, verbose=1)
r = results[0]
visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'],
colors=get_colors_for_class_ids(r['class_ids']), ax=fig.axes[-1])
# Get filenames of test dataset DICOM images
test_image_fps = get_dicom_fps(test_dicom_dir)
```
### Final steps - Create the submission file
```
# Make predictions on test images, write out sample submission
def predict(image_fps, filepath='sample_submission.csv', min_conf=0.98):
# assume square image
resize_factor = ORIG_SIZE / config.IMAGE_SHAPE[0]
with open(filepath, 'w') as file:
for image_id in tqdm(image_fps):
ds = pydicom.read_file(image_id)
image = ds.pixel_array
# If grayscale. Convert to RGB for consistency.
if len(image.shape) != 3 or image.shape[2] != 3:
image = np.stack((image,) * 3, -1)
patient_id = os.path.splitext(os.path.basename(image_id))[0]
results = model.detect([image])
r = results[0]
out_str = ""
out_str += patient_id
assert( len(r['rois']) == len(r['class_ids']) == len(r['scores']) )
if len(r['rois']) == 0:
pass
else:
num_instances = len(r['rois'])
out_str += ","
for i in range(num_instances):
if r['scores'][i] > min_conf:
out_str += ' '
out_str += str(round(r['scores'][i], 2))
out_str += ' '
# x1, y1, width, height
x1 = r['rois'][i][1]
y1 = r['rois'][i][0]
width = r['rois'][i][3] - x1
height = r['rois'][i][2] - y1
bboxes_str = "{} {} {} {}".format(x1*resize_factor, y1*resize_factor, \
width*resize_factor, height*resize_factor)
out_str += bboxes_str
file.write(out_str+"\n")
# predict only the first 50 entries
sample_submission_fp = 'sample_submission.csv'
predict(test_image_fps[:50], filepath=sample_submission_fp)
output = pd.read_csv(sample_submission_fp, names=['id', 'pred_string'])
output.head(50)
```
| github_jupyter |
## Linear least-squares and a bland dense network
We're going to use the MIT-BIH datasets to train and test a basic feedforward network and see how it does. We'll compare the results to a linear regression.
We'll use two different inputs: a mostly unprocessed version of the dataset, and a version in the frequency domain obtained by applying the FFT.
```
import datetime
import os
import logging
import numpy as np
import tensorflow as tf
import tools.plot as plot
import tools.train as train
import tools.models as models
## Read in data
files = ("../data/mitbih_train.csv", "../data/mitbih_test.csv")
inputs, labels, sparse_labels, df = train.preprocess(*files, fft=False)
inputs_fft = train.dataset_fft(inputs)
train.class_count(df)
```
Let's look at a few random samples of the training data:
```
plot.plot_ecg(files[0], 125, 1)
```
### Least-squares
Let's try least-squares regression with numpy.
```
lstsq_soln = np.linalg.lstsq(inputs["train"], labels["train"], rcond=None)
lstsq_soln_fft = np.linalg.lstsq(inputs_fft["train"], labels["train"], rcond=None)
print("Rank of training dataset:", lstsq_soln[2])
print("Rank of training dataset after (real) FFT:", lstsq_soln_fft[2])
```
Now let's see how accurate it is.
```
def lstsq_accuracy(inputs, labels, coeffs):
predict = {}
accuracy = {}
for key in inputs:
predict[key] = np.argmax(np.dot(inputs[key], coeffs), axis=1)
num_correct = np.sum(
labels[key][range(labels[key].shape[0]), predict[key]] == 1
)
accuracy[key] = num_correct / labels[key].shape[0]
print("Training accuracy:", accuracy["train"])
print("Test accuracy:", accuracy["test"])
return predict
print("Regular least-squares")
predict = lstsq_accuracy(inputs, labels, lstsq_soln[0])
plot.plot_cm(sparse_labels["test"], predict["test"], classes=np.arange(5), normalize=True)
print("After FFT")
predict_fft = lstsq_accuracy(inputs_fft, labels, lstsq_soln_fft[0])
plot.plot_cm(sparse_labels["test"], predict_fft["test"], classes=np.arange(5), normalize=True)
```
### Dense feed-forward network
Let's try an unregularized, bland feed-forward network with a couple of hidden layers.
```
# Tensorboard logging
rightnow = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
nofftpath = os.path.join("..", "logs", rightnow, "nofft")
config = {
"optimizer": "Nadam",
"loss": "categorical_crossentropy",
"batch_size": 200,
"val_split": 0.05,
"epochs": 300,
"verbose": 0,
"patience": 20,
"logdir": nofftpath,
}
inputsize = inputs["train"].shape[1]
ncategories = labels["train"].shape[1]
hiddenlayers = [(100, "relu")]
# Suppress tensorflow warnings about internal deprecations
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
print("Unprocessed data")
model = models.create_dense(inputsize, hiddenlayers, ncategories)
history = train.train_print(model, inputs, labels, config)
plot.plot_fit_history(history)
test_pred = np.argmax(model.predict(inputs["test"]), axis=1)
plot.plot_cm(
sparse_labels["test"],
test_pred,
classes=np.array(["N", "S", "V", "F", "Q"]),
normalize=True,
)
# Tensorboard logging
fftpath = os.path.join("..", "logs", rightnow, "fft")
config_fft = config
config_fft["logdir"] = fftpath
print("After FFT")
model_fft = models.create_dense(inputs_fft["train"].shape[1], hiddenlayers, ncategories)
history_fft = train.train_print(model_fft, inputs_fft, labels, config_fft)
plot.plot_fit_history(history_fft)
test_pred_fft = np.argmax(model_fft.predict(inputs_fft["test"]), axis=1)
plot.plot_cm(
sparse_labels["test"],
test_pred_fft,
classes=np.array(["N", "S", "V", "F", "Q"]),
normalize=True,
)
```
The results don't tend to be very consistent. The final test accuracy varies from run to run generally fairly significantly and it's not clear if the FFT "does" anything for the accuracy of the training.
| github_jupyter |
### Data Visualization
#### `matplotlib` - from the documentation:
https://matplotlib.org/3.1.1/tutorials/introductory/pyplot.html
`matplotlib.pyplot` is a collection of command style functions that make matplotlib work like MATLAB. <br>
Each pyplot function makes some change to a figure: e.g., creates a figure, creates a plotting area in a figure, plots some lines in a plotting area, decorates the plot with labels, etc.
In `matplotlib.pyplot` various states are preserved across function calls, so that it keeps track of things like the current figure and plotting area, and the plotting functions are directed to the current axes.<br>
"axes" in most places in the documentation refers to the axes part of a figure and not the strict mathematical term for more than one axis).
```
%matplotlib inline
import matplotlib.pyplot as plt
```
Call signatures::
```
plot([x], y, [fmt], data=None, **kwargs)
plot([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)
```
Quick plot
The main usage of `plt` is the `plot()` and `show()` functions
```
plt.plot()
plt.show()
```
List
```
plt.plot([8, 24, 27, 42])
plt.ylabel('numbers')
plt.show()
# Plot the two lists, add axes labels
x=[4,5,6,7]
y=[2,5,1,7]
```
`matplotlib` can use *format strings* to quickly declare the type of plots you want. Here are *some* of those formats:
|**Character**|**Description**|
|:-----------:|:--------------|
|'--'|Dashed line|
|':'|Dotted line|
|'o'|Circle marker|
|'^'|Upwards triangle marker|
|'b'|Blue|
|'c'|Cyan|
|'g'|Green|
```
plt.plot([3, 4, 9, 20], 'gs')
plt.axis([-1, 4, 0, 22])
plt.show()
plt.plot([3, 4, 9, 20], 'b^--', linewidth=2, markersize=12)
plt.show()
plt.plot([3, 4, 9, 20], color='blue', marker='^', linestyle='dashed', linewidth=2, markersize=12)
plt.show()
# Plot a list with 10 numbers with a magenta dotted line and circles for points.
import numpy as np
# evenly sampled time
time = np.arange(0, 7, 0.3)
# gene expression
ge = np.arange(1, 8, 0.3)
# red dashes, blue squares and green triangles
plt.plot(time, ge, 'r--', time, ge**2, 'bs', time, ge**3, 'g^')
plt.show()
```
linestyle or ls [ '-' | '--' | '-.' | ':' |
```
lines = plt.plot([1, 2, 3])
plt.setp(lines)
names = ['A', 'B', 'C', 'D']
values = [7, 20, 33, 44]
values1 = np.random.rand(100)
plt.figure(figsize=(9, 3))
plt.subplot(131)
plt.bar(names, values)
plt.subplot(132)
plt.scatter(names, values)
plt.subplot(133)
plt.hist(values1)
plt.suptitle('Categorical Plotting')
plt.show()
import pandas as pd
df_iris = pd.read_csv('https://raw.githubusercontent.com/uiuc-cse/data-fa14/gh-pages/data/iris.csv')
df_iris.head()
x1 = df_iris.petal_length
y1 = df_iris.petal_width
x2 = df_iris.sepal_length
y2 = df_iris.sepal_width
plt.plot(x1, y1, 'g^', x2, y2, 'bs')
plt.show()
```
#### Histogram
```
help(plt.hist)
n, bins, patches = plt.hist(df_iris.petal_length, bins=20,facecolor='#8303A2', alpha=0.8, rwidth=.8, align='mid')
# Add a title
plt.title('Iris dataset petal length')
# Add y axis label
plt.ylabel('number of plants')
```
#### Boxplot
```
help(plt.boxplot)
plt.boxplot(df_iris.petal_length)
# Add a title
plt.title('Iris dataset petal length')
# Add y axis label
plt.ylabel('petal length')
```
The biggest issue with `matplotlib` isn't its lack of power...it is that it is too much power. With great power, comes great responsibility. When you are quickly exploring data, you don't want to have to fiddle around with axis limits, colors, figure sizes, etc. Yes, you *can* make good figures with `matplotlib`, but you probably won't.
https://python-graph-gallery.com/matplotlib/
Pandas works off of `matplotlib` by default. You can easily start visualizing dataframs and series just by a simple command.
#### Using pandas `.plot()`
Pandas abstracts some of those initial issues with data visualization. However, it is still a `matplotlib` plot</br></br>
Every plot that is returned from `pandas` is subject to `matplotlib` modification.
```
df_iris.plot.box()
plt.show()
# Plot the histogram of the petal lengths
# Plot the histograms of all 4 numerical characteristics in a plot
df_iris.groupby("species")['petal_length'].mean().plot(kind='bar')
plt.show()
df_iris.plot(x='petal_length', y='petal_width', kind = "scatter")
plt.savefig('output.png')
plt.savefig('output.png')
```
https://github.com/pandas-dev/pandas/blob/v0.25.0/pandas/plotting/_core.py#L504-L1533
#### Multiple Plots
```
df_iris.petal_length.plot(kind='density')
df_iris.sepal_length.plot(kind='density')
df_iris.petal_width.plot(kind='density')
```
`matplotlib` allows users to define the regions of their plotting canvas. If the user intends to create a canvas with multiple plots, they would use the `subplot()` function. The `subplot` function sets the number of rows and columns the canvas will have **AND** sets the current index of where the next subplot will be rendered.
```
plt.figure(1)
# Plot all three columns from df in different subplots
# Rows first index (top-left)
plt.subplot(3, 1, 1)
df_iris.petal_length.plot(kind='density')
plt.subplot(3, 1, 2)
df_iris.sepal_length.plot(kind='density')
plt.subplot(3, 1, 3)
df_iris.petal_width.plot(kind='density')
# Some plot configuration
plt.subplots_adjust(top=.92, bottom=.08, left=.1, right=.95, hspace=.25, wspace=.35)
plt.show()
# Temporary styles
with plt.style.context(('ggplot')):
plt.figure(1)
# Plot all three columns from df in different subplots
# Rows first index (top-left)
plt.subplot(3, 1, 1)
df_iris.petal_length.plot(kind='density')
plt.subplot(3, 1, 2)
df_iris.sepal_length.plot(kind='density')
plt.subplot(3, 1, 3)
df_iris.petal_width.plot(kind='density')
# Some plot configuration
plt.subplots_adjust(top=.92, bottom=.08, left=.1, right=.95, hspace=.25, wspace=.35)
plt.show()
# Plot the histograms of the petal length and width and sepal length and width
# Display them on the columns of a figure with 2X2 subplots
# color them red, green, blue and yellow, respectivelly
```
### `seaborn` - dataset-oriented plotting
Seaborn is a library that specializes in making *prettier* `matplotlib` plots of statistical data. <br>
It is built on top of matplotlib and closely integrated with pandas data structures.
https://seaborn.pydata.org/introduction.html<br>
https://python-graph-gallery.com/seaborn/
```
import seaborn as sns
```
`seaborn` lets users *style* their plotting environment.
```
sns.set(style='whitegrid')
```
However, you can always use `matplotlib`'s `plt.style`
```
#dir(sns)
sns.scatterplot(x='petal_length',y='petal_width',data=df_iris)
plt.show()
sns.scatterplot(x='petal_length',y='petal_width', hue = "species",data=df_iris)
plt.show()
```
#### Violin plot
Fancier box plot that gets rid of the need for 'jitter' to show the inherent distribution of the data points
```
columns = ['petal_length', 'petal_width', 'sepal_length']
fig, axes = plt.subplots(figsize=(10, 10))
sns.violinplot(data=df_iris.loc[:,columns], ax=axes)
axes.set_ylabel('number')
axes.set_xlabel('columns', )
plt.show()
```
#### Distplot
```
sns.set(style='darkgrid', palette='muted')
# 1 row, 3 columns
f, axes = plt.subplots(4,1, figsize=(10,10), sharex=True)
sns.despine(left=True)
# Regular displot
sns.distplot(df_iris.petal_length, ax=axes[0])
# Change the color
sns.distplot(df_iris.petal_width, kde=False, ax=axes[1], color='orange')
# Show the Kernel density estimate
sns.distplot(df_iris.sepal_width, hist=False, kde_kws={'shade':True}, ax=axes[2], color='purple')
# Show the rug
sns.distplot(df_iris.sepal_length, hist=False, rug=True, ax=axes[3], color='green')
```
#### FacetGrid
```
sns.set()
columns = ['species', 'petal_length', 'petal_width']
facet_column = 'species'
g = sns.FacetGrid(df_iris.loc[:,columns], col=facet_column, hue=facet_column, col_wrap=5)
g.map(plt.scatter, 'petal_length', 'petal_width')
sns.relplot(x="petal_length", y="petal_width", col="species",
hue="species", style="species", size="species",
data=df_iris)
plt.show()
```
https://jakevdp.github.io/PythonDataScienceHandbook/04.14-visualization-with-seaborn.html
### `plotnine` - R ggplot2 in python
plotnine is an implementation of a grammar of graphics in Python, it is based on ggplot2. The grammar allows users to compose plots by explicitly mapping data to the visual objects that make up the plot.
Plotting with a grammar is powerful, it makes custom (and otherwise complex) plots are easy to think about and then create, while the simple plots remain simple.
```
!pip install plotnine
```
https://plotnine.readthedocs.io/en/stable/
```
from plotnine import *
ggplot(data=df_iris) + aes(x="petal_length", y = "petal_width") + geom_point()
# add transparency - to avoid over plotting
ggplot(data=df_iris) + aes(x="petal_length", y = "petal_width") + geom_point(alpha=0.7)
# change point size
ggplot(data=df_iris) + aes(x="petal_length", y = "petal_width") + geom_point(size = 0.7, alpha=0.7)
# more parameters
ggplot(data=df_iris) + aes(x="petal_length", y = "petal_width") + geom_point() + scale_x_log10() + xlab("Petal Length")
n = "3"
ft = "length and width"
title = 'species : %s, petal : %s' % (n,ft)
ggplot(data=df_iris) +aes(x='petal_length',y='petal_width',color="species") + geom_point(size=0.7,alpha=0.7) + facet_wrap('~species',nrow=3) + theme(figure_size=(9,5)) + ggtitle(title)
p = ggplot(data=df_iris) + aes(x='petal_length') + geom_histogram(binwidth=1,color='black',fill='grey')
p
ggsave(plot=p, filename='hist_plot_with_plotnine.png')
```
http://cmdlinetips.com/2018/05/plotnine-a-python-library-to-use-ggplot2-in-python/ <br>
https://www.rstudio.com/wp-content/uploads/2015/03/ggplot2-cheatsheet.pdf
<img src = "https://www.rstudio.com/wp-content/uploads/2015/03/ggplot2-cheatsheet.pdf" width = "1000"/>
Use ggplot to plot the sepal_length in boxplots separated by species, add new axes labels and make the y axis values log10.
* Write a function that takes as a parameter a line of the dataframe and if the species is
** setosa it returns the petal_length
** versicolor it returns the petal_width
** virginica it returns the sepal_length
Apply this function to every line in the dataset in a for loop and save the result in an array
Use ggplot to make a histogram of the values
| github_jupyter |
<img src="images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="250 px" align="left">
## _*E91 quantum key distribution protocol*_
***
### Contributors
Andrey Kardashin
## *Introduction*
Suppose that Alice wants to send a message to Bob.
In order to protect the information in the message from the eavesdropper Eve, it must be encrypted.
Encryption is the process of encoding the *plaintext* into *ciphertext*.
The strength of encryption, that is, the property to resist decryption, is determined by its algorithm.
Any encryption algorithm is based on the use of a *key*.
In order to generate the ciphertext, the [one-time pad technique](https://en.wikipedia.org/wiki/One-time_pad) is usually used.
The idea of this technique is to apply the *exclusive or* (XOR) $\oplus$ operation to bits of the plaintext and bits of the key to obtain the ciphertext.
Thus, if $m=(m_1 \ldots m_n)$, $c=(c_1 \ldots c_n)$ and $k=(k_1 \ldots k_n)$ are binary strings of plaintext, ciphertext and key respectively, then the encryption is defined as $c_i=m_i \oplus k_i$, and decryption as $m_i=c_i \oplus k_i$.

The one-time pad method is proved to be be absolutely secure.
Thus, if Eve intercepted the ciphertext $c$, she will not get any information from the message $m$ until she has the key $k$.
The main problem of modern cryptographic systems is the distribution among the participants of the communication session of a secret key, possession of which should not be available to third parties.
The rapidly developing methods of quantum key distribution can solve this problem regardless of the capabilities of the eavesdropper.
In this tutorial, we show how Alice and Bob can generate a secret key using the *E91* quantum key distribution protocol.
## *Quantum entanglement*
The E91 protocol developed by Artur Ekert in 1991 is based on the use of entangled states and Bell's theorem (see [Entanglement Revisited](https://github.com/Qiskit/qiskit-tutorial/blob/master/reference/qis/entanglement_revisited.ipynb) QISKit tutorial).
It is known that two electrons *A* and *B* can be prepared in such a state that they can not be considered separately from each other.
One of these states is the singlet state
$$\lvert\psi_s\rangle =
\frac{1}{\sqrt{2}}(\lvert0\rangle_A\otimes\lvert1\rangle_B - \lvert1\rangle_A\otimes\lvert0\rangle_B) =
\frac{1}{\sqrt{2}}(\lvert01\rangle - \lvert10\rangle),$$
where the vectors $\lvert 0 \rangle$ and $\lvert 1 \rangle$ describe the states of each electron with the [spin](https://en.wikipedia.org/wiki/Spin_(physics%29) projection along the positive and negative direction of the *z* axis.
The observable of the projection of the spin onto the direction $\vec{n}=(n_x, n_y, n_z)$ is given by
$$\vec{n} \cdot \vec{\sigma} =
n_x X + n_y Y + n_z Z,$$
where $\vec{\sigma} = (X, Y, Z)$ and $X, Y, Z$ are the Pauli matrices.
For two qubits *A* and *B*, the observable $(\vec{a} \cdot \vec{\sigma})_A \otimes (\vec{b} \cdot \vec{\sigma})_B$ describes the joint measurement of the spin projections onto the directions $\vec{a}$ and $\vec{b}$.
It can be shown that the expectation value of this observable in the singlet state is
$$\langle (\vec{a} \cdot \vec{\sigma})_A \otimes (\vec{b} \cdot \vec{\sigma})_B \rangle_{\psi_s} =
-\vec{a} \cdot \vec{b}. \qquad\qquad (1)$$
Here we see an interesting fact: if Alice and Bob measure the spin projections of electrons A and B onto the same direction, they will obtain the opposite results.
Thus, if Alice got the result $\pm 1$, then Bob *definitely* will get the result $\mp 1$, i.e. the results will be perfectly anticorrelated.
## *CHSH inequality*
In the framework of classical physics, it is impossible to create a correlation inherent in the singlet state $\lvert\psi_s\rangle$.
Indeed, let us measure the observables $X$, $Z$ for qubit *A* and observables $W = \frac{1}{\sqrt{2}} (X + Z)$, $V = \frac{1}{\sqrt{2}} (-X + Z)$ for qubit *B*.
Performing joint measurements of these observables, the following expectation values can be obtained:
\begin{eqnarray*}
\langle X \otimes W \rangle_{\psi_s} &= -\frac{1}{\sqrt{2}}, \quad
\langle X \otimes V \rangle_{\psi_s} &= \frac{1}{\sqrt{2}}, \qquad\qquad (2) \\
\langle Z \otimes W \rangle_{\psi_s} &= -\frac{1}{\sqrt{2}}, \quad
\langle Z \otimes V \rangle_{\psi_s} &= -\frac{1}{\sqrt{2}}.
\end{eqnarray*}
Now we can costruct the *Clauser-Horne-Shimony-Holt (CHSH) correlation value*:
$$C =
\langle X\otimes W \rangle - \langle X \otimes V \rangle + \langle Z \otimes W \rangle + \langle Z \otimes V \rangle =
-2 \sqrt{2}. \qquad\qquad (3)$$
The [local hidden variable theory](https://en.wikipedia.org/wiki/Local_hidden_variable_theory) which was developed in particular to explain the quantum correlations gives that $\lvert C \rvert \leqslant 2$.
But [Bell's theorem](https://en.wikipedia.org/wiki/Bell's_theorem) states that "no physical theory of local hidden variables can ever reproduce all of the predictions of quantum mechanics."
Thus, the violation of the [CHSH inequality](https://en.wikipedia.org/wiki/Bell's_theorem#Bell_inequalities_are_violated_by_quantum_mechanical_predictions) (i.e. $C = -2 \sqrt{2}$ for the singlet state), which is a generalized form of Bell's inequality, can serve as an *indicator of quantum entanglement*.
This fact finds its application in the E91 protocol.
## *The protocol*
To implement the E91 quantum key distribution protocol, there must be a source of qubits prepared in the singlet state.
It does not matter to whom this source belongs: to Alice, to Bob, to some trusted third-party Charlie or even to Eve.
The steps of the E91 protocol are following.
1. Charlie, the owner of the singlet state preparation device, creates $N$ entangled states $\lvert\psi_s\rangle$ and sends qubits *A* to Alice and qubits *B* to Bob via the quantum channel.

2. Participants Alice and Bob generate strings $b=(b_1 \ldots b_N)$ and $b^{'}=(b_1^{'} \ldots b_N^{'})$, where $b_i, b^{'}_j = 1, 2, 3$.
Depending on the elements of these strings, Alice and Bob measure the spin projections of their qubits along the following directions:
\begin{align*}
b_i = 1: \quad \vec{a}_1 &= (1,0,0) \quad (X \text{ observable}) &
b_j^{'} = 1: \quad \vec{b}_1 &= \left(\frac{1}{\sqrt{2}},0,\frac{1}{\sqrt{2}}\right) \quad (W \text{ observable})
\\
b_i = 2: \quad \vec{a}_2 &= \left(\frac{1}{\sqrt{2}},0,\frac{1}{\sqrt{2}}\right) \quad (W \text{ observable}) &
b_j^{'} = 2: \quad \vec{b}_2 &= (0,0,1) \quad ( \text{Z observable})
\\
b_i = 3: \quad \vec{a}_3 &= (0,0,1) \quad (Z \text{ observable}) &
b_j^{'} = 3: \quad \vec{b}_3 &= \left(-\frac{1}{\sqrt{2}},0,\frac{1}{\sqrt{2}}\right) \quad (V \text{ observable})
\end{align*}
<img src="images/vectors.png" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="center">
We can describe this process as a measurement of the observables $(\vec{a}_i \cdot \vec{\sigma})_A \otimes (\vec{b}_j \cdot \vec{\sigma})_B$ for each singlet state created by Charlie.
3. Alice and Bob record the results of their measurements as elements of strings $a=(a_1 \ldots a_N)$ and $a^{'} =(a_1^{'} \ldots a_N^{'})$ respectively, where $a_i, a^{'}_j = \pm 1$.
4. Using the classical channel, participants compare their strings $b=(b_1 \ldots b_N)$ and $b^{'}=(b_1^{'} \ldots b_N^{'})$.
In other words, Alice and Bob tell each other which measurements they have performed during the step 2.
If Alice and Bob have measured the spin projections of the $m$-th entangled pair of qubits onto the same direction (i.e. $\vec{a}_2/\vec{b}_1$ or $\vec{a}_3/\vec{b}_2$ for Alice's and Bob's qubit respectively), then they are sure that they obtained opposite results, i.e. $a_m = - a_m^{'}$ (see Eq. (1)).
Thus, for the $l$-th bit of the key strings $k=(k_1 \ldots k_n),k^{'}=(k_1^{'} \ldots k_n^{'})$ Alice and Bob can write $k_l = a_m, k_l^{'} = -a_m^{'}$.

5. Using the results obtained after measuring the spin projections onto the $\vec{a}_1/\vec{b}_1$, $\vec{a}_1/\vec{b}_3$, $\vec{a}_3/\vec{b}_1$ and $\vec{a}_3/\vec{b}_3$ directions (observables $(2)$), Alice and Bob calculate the CHSH correlation value $(3)$.
If $C = -2\sqrt{2}$, then Alice and Bob can be sure that the states they had been receiving from Charlie were entangled indeed.
This fact tells the participants that there was no interference in the quantum channel.
## *Simulation*
In this section we simulate the E91 quantum key distribution protocol *without* the presence of an eavesdropper.
### *Step one: creating the singlets*
In the first step Alice and Bob receive their qubits of the singlet states $\lvert\psi_s\rangle$ created by Charlie.
For our simulation, we need registers with two quantum bits and four classical bits.
```
# Checking the version of PYTHON; we only support > 3.5
import sys
if sys.version_info < (3,5):
raise Exception('Please use Python version 3.5 or greater.')
# useful additional packages
import numpy as np
import random
import re # regular expressions module
# importing the QISKit
from qiskit import QuantumCircuit, QuantumProgram
#import Qconfig
# Quantum program setup
Q_program = QuantumProgram()
#Q_program.set_api(Qconfig.APItoken, Qconfig.config['url']) # set the APIToken and API url
# Creating registers
qr = Q_program.create_quantum_register("qr", 2)
cr = Q_program.create_classical_register("cr", 4)
```
Let us assume that qubits *qr\[0\]* and *qr\[1\]* belong to Alice and Bob respetively.
In classical bits *cr\[0\]* and *cr\[1\]* Alice and Bob store their measurement results, and classical bits *cr\[2\]* and *cr\[3\]* are used by Eve to store her measurement results of Alice's and Bob's qubits.
Now Charlie creates a singlet state:
```
singlet = Q_program.create_circuit('singlet', [qr], [cr])
singlet.x(qr[0])
singlet.x(qr[1])
singlet.h(qr[0])
singlet.cx(qr[0],qr[1])
```
Qubits *qr\[0\]* and *qr\[1\]* are now entangled.
After creating a singlet state, Charlie sends qubit *qr\[0\]* to Alice and qubit *qr\[1\]* to Bob.

### *Step two: measuring*
First let us prepare the measurements which will be used by Alice and Bob.
We define $A(\vec{a}_i) = \vec{a}_i \cdot \vec{\sigma}$ and $B(\vec{b}_j) = \vec{b}_j \cdot \vec{\sigma}$ as the spin projection observables used by Alice and Bob for their measurements.
To perform these measurements, the standard basis $Z$ must be rotated to the proper basis when it is needed (see [Superposition](https://quantumexperience.ng.bluemix.net/proxy/tutorial/full-user-guide/002-The_Weird_and_Wonderful_World_of_the_Qubit/020-Superposition.html) and [Entanglement and Bell Tests](https://quantumexperience.ng.bluemix.net/proxy/tutorial/full-user-guide/003-Multiple_Qubits_Gates_and_Entangled_States/050-Entanglement_and_Bell_Tests.html) user guides).
Here we define the notation of possible measurements of Alice and Bob:

Blocks on the left side can be considered as *detectors* used by the participants to measure $X, W, Z$ and $V$ observables.
Now we prepare the corresponding curcuits.
```
## Alice's measurement circuits
# measure the spin projection of Alice's qubit onto the a_1 direction (X basis)
measureA1 = Q_program.create_circuit('measureA1', [qr], [cr])
measureA1.h(qr[0])
measureA1.measure(qr[0],cr[0])
# measure the spin projection of Alice's qubit onto the a_2 direction (W basis)
measureA2 = Q_program.create_circuit('measureA2', [qr], [cr])
measureA2.s(qr[0])
measureA2.h(qr[0])
measureA2.t(qr[0])
measureA2.h(qr[0])
measureA2.measure(qr[0],cr[0])
# measure the spin projection of Alice's qubit onto the a_3 direction (standard Z basis)
measureA3 = Q_program.create_circuit('measureA3', [qr], [cr])
measureA3.measure(qr[0],cr[0])
## Bob's measurement circuits
# measure the spin projection of Bob's qubit onto the b_1 direction (W basis)
measureB1 = Q_program.create_circuit('measureB1', [qr], [cr])
measureB1.s(qr[1])
measureB1.h(qr[1])
measureB1.t(qr[1])
measureB1.h(qr[1])
measureB1.measure(qr[1],cr[1])
# measure the spin projection of Bob's qubit onto the b_2 direction (standard Z basis)
measureB2 = Q_program.create_circuit('measureB2', [qr], [cr])
measureB2.measure(qr[1],cr[1])
# measure the spin projection of Bob's qubit onto the b_3 direction (V basis)
measureB3 = Q_program.create_circuit('measureB3', [qr], [cr])
measureB3.s(qr[1])
measureB3.h(qr[1])
measureB3.tdg(qr[1])
measureB3.h(qr[1])
measureB3.measure(qr[1],cr[1])
## Lists of measurement circuits
aliceMeasurements = [measureA1, measureA2, measureA3]
bobMeasurements = [measureB1, measureB2, measureB3]
```
Supose Alice and Bob want to generate a secret key using $N$ singlet states prepared by Charlie.
```
# Define the number of singlets N
numberOfSinglets = 500
```
The participants must choose the directions onto which they will measure the spin projections of their qubits.
To do this, Alice and Bob create the strings $b$ and $b^{'}$ with randomly generated elements.
```
aliceMeasurementChoices = [random.randint(1, 3) for i in range(numberOfSinglets)] # string b of Alice
bobMeasurementChoices = [random.randint(1, 3) for i in range(numberOfSinglets)] # string b' of Bob
```
Now we combine Charlie's device and Alice's and Bob's detectors into one circuit (singlet + Alice's measurement + Bob's measurement).
```
circuits = [] # the list in which the created circuits will be stored
for i in range(numberOfSinglets):
# create the name of the i-th circuit depending on Alice's and Bob's measurement choices
circuitName = str(i) + ':A' + str(aliceMeasurementChoices[i]) + '_B' + str(bobMeasurementChoices[i])
# create the joint measurement circuit
# add Alice's and Bob's measurement circuits to the singlet state curcuit
Q_program.add_circuit(circuitName,
singlet + # singlet state circuit
aliceMeasurements[aliceMeasurementChoices[i]-1] + # measurement circuit of Alice
bobMeasurements[bobMeasurementChoices[i]-1] # measurement circuit of Bob
)
# add the created circuit to the circuits list
circuits.append(circuitName)
```
Let us look at the name of one of the prepared circuits.
```
print(circuits[0])
```
It tells us about the number of the singlet state received from Charlie, and the measurements applied by Alice and Bob.
In the *circuits* list we have stored $N$ (*numberOfSinglets*) circuits similar to those shown in the figure below.

The idea is to model every act of the creation of the singlet state, the distribution of its qubits among the participants and the measurement of the spin projection onto the chosen direction in the E91 protocol by executing each circuit from the *circuits* list with one shot.
### *Step three: recording the results*
First let us execute the circuits on the simulator.
```
result = Q_program.execute(circuits, backend='local_qasm_simulator', shots=1, max_credits=5, wait=10, timeout=240)
print(result)
```
Look at the output of the execution of the first circuit.
```
result.get_counts(circuits[0])
```
It consists of four digits.
Recall that Alice and Bob store the results of the measurement in classical bits *cr\[0\]* and *cr\[1\]* (two digits on the right).
Since we model the secret key generation process without the presence of an eavesdropper, the classical bits *cr\[2\]* and *cr\[3\]* are always 0.
Also note that the output is the Python dictionary, in which the keys are the obtained results, and the values are the counts.
Alice and Bob record the results of their measurements as bits of the strings $a$ and $a^{'}$.
To simulate this process we need to use regular expressions module *[re](https://docs.python.org/3/howto/regex.html#regex-howto)*.
First, we compile the search patterns.
```
abPatterns = [
re.compile('..00$'), # search for the '..00' output (Alice obtained -1 and Bob obtained -1)
re.compile('..01$'), # search for the '..01' output
re.compile('..10$'), # search for the '..10' output (Alice obtained -1 and Bob obtained 1)
re.compile('..11$') # search for the '..11' output
]
```
Using these patterns, we can find particular results in the outputs and fill strings the $a$ and $a^{'}$ with the results of Alice's and Bob's measurements.
```
aliceResults = [] # Alice's results (string a)
bobResults = [] # Bob's results (string a')
for i in range(numberOfSinglets):
res = list(result.get_counts(circuits[i]).keys())[0] # extract the key from the dict and transform it to str; execution result of the i-th circuit
if abPatterns[0].search(res): # check if the key is '..00' (if the measurement results are -1,-1)
aliceResults.append(-1) # Alice got the result -1
bobResults.append(-1) # Bob got the result -1
if abPatterns[1].search(res):
aliceResults.append(1)
bobResults.append(-1)
if abPatterns[2].search(res): # check if the key is '..10' (if the measurement results are -1,1)
aliceResults.append(-1) # Alice got the result -1
bobResults.append(1) # Bob got the result 1
if abPatterns[3].search(res):
aliceResults.append(1)
bobResults.append(1)
```
### *Step four: revealing the bases*
In the previos step we have stored the measurement results of Alice and Bob in the *aliceResults* and *bobResults* lists (strings $a$ and $a^{'}$).
Now the participants compare their strings $b$ and $b^{'}$ via the public classical channel.
If Alice and Bob have measured the spin projections of their qubits of the *i*-th singlet onto the same direction, then Alice records the result $a_i$ as the bit of the string $k$, and Bob records the result $-a_i$ as the bit of the string $k^{'}$ (see Eq. (1)).
```
aliceKey = [] # Alice's key string k
bobKey = [] # Bob's key string k'
# comparing the stings with measurement choices
for i in range(numberOfSinglets):
# if Alice and Bob have measured the spin projections onto the a_2/b_1 or a_3/b_2 directions
if (aliceMeasurementChoices[i] == 2 and bobMeasurementChoices[i] == 1) or (aliceMeasurementChoices[i] == 3 and bobMeasurementChoices[i] == 2):
aliceKey.append(aliceResults[i]) # record the i-th result obtained by Alice as the bit of the secret key k
bobKey.append(- bobResults[i]) # record the multiplied by -1 i-th result obtained Bob as the bit of the secret key k'
keyLength = len(aliceKey) # length of the secret key
```
The keys $k$ and $k'$ are now stored in the *aliceKey* and *bobKey* lists, respectively.
The remaining results which were not used to create the keys can now be revealed.
It is important for Alice and Bob to have the same keys, i.e. strings $k$ and $k^{'}$ must be equal.
Let us compare the bits of strings $k$ and $k^{'}$ and find out how many there are mismatches in the keys.
```
abKeyMismatches = 0 # number of mismatching bits in Alice's and Bob's keys
for j in range(keyLength):
if aliceKey[j] != bobKey[j]:
abKeyMismatches += 1
```
Note that since the strings $k$ and $k^{'}$ are secret, Alice and Bob have no information about mismatches in the bits of their keys.
To find out the number of errors, the participants can perform a random sampling test.
Alice randomly selects $\delta$ bits of her secret key and tells Bob which bits she selected.
Then Alice and Bob compare the values of these check bits.
For large enough $\delta$ the number of errors in the check bits will be close to the number of errors in the remaining bits.
### *Step five: CHSH correlation value test*
Alice and Bob want to be sure that there was no interference in the communication session.
To do that, they calculate the CHSH correlation value $(3)$ using the results obtained after the measurements of spin projections onto the $\vec{a}_1/\vec{b}_1$, $\vec{a}_1/\vec{b}_3$, $\vec{a}_3/\vec{b}_1$ and $\vec{a}_3/\vec{b}_3$ directions.
Recall that it is equivalent to the measurement of the observables $X \otimes W$, $X \otimes V$, $Z \otimes W$ and $Z \otimes V$ respectively.
According to the Born-von Neumann statistical postulate, the expectation value of the observable $E = \sum_j e_j \lvert e_j \rangle \langle e_j \rvert$ in the state $\lvert \psi \rangle$ is given by
$$\langle E \rangle_\psi =
\mathrm{Tr}\, \lvert\psi\rangle \langle\psi\rvert \, E = \\
\mathrm{Tr}\, \lvert\psi\rangle \langle\psi\rvert \sum_j e_j \lvert e_j \rangle \langle e_j \rvert =
\sum_j \langle\psi\rvert(e_j \lvert e_j \rangle \langle e_j \rvert) \lvert\psi\rangle =
\sum_j e_j \left|\langle\psi\lvert e_j \rangle \right|^2 = \\
\sum_j e_j \mathrm{P}_\psi (E \models e_j),$$
where $\lvert e_j \rangle$ is the eigenvector of $E$ with the corresponding eigenvalue $e_j$, and $\mathrm{P}_\psi (E \models e_j)$ is the probability of obtainig the result $e_j$ after measuring the observable $E$ in the state $\lvert \psi \rangle$.
A similar expression can be written for the joint measurement of the observables $A$ and $B$:
$$\langle A \otimes B \rangle_\psi =
\sum_{j,k} a_j b_k \mathrm{P}_\psi (A \models a_j, B \models b_k) =
\sum_{j,k} a_j b_k \mathrm{P}_\psi (a_j, b_k). \qquad\qquad (4)$$
Note that if $A$ and $B$ are the spin projection observables, then the corresponding eigenvalues are $a_j, b_k = \pm 1$.
Thus, for the observables $A(\vec{a}_i)$ and $B(\vec{b}_j)$ and singlet state $\lvert\psi\rangle_s$ we can rewrite $(4)$ as
$$\langle A(\vec{a}_i) \otimes B(\vec{b}_j) \rangle =
\mathrm{P}(-1,-1) - \mathrm{P}(1,-1) - \mathrm{P}(-1,1) + \mathrm{P}(1,1). \qquad\qquad (5)$$
In our experiments, the probabilities on the right side can be calculated as follows:
$$\mathrm{P}(a_j, b_k) = \frac{n_{a_j, b_k}(A \otimes B)}{N(A \otimes B)}, \qquad\qquad (6)$$
where the numerator is the number of results $a_j, b_k$ obtained after measuring the observable $A \otimes B$, and the denominator is the total number of measurements of the observable $A \otimes B$.
Since Alice and Bob revealed their strings $b$ and $b^{'}$, they know what measurements they performed and what results they have obtained.
With this data, participants calculate the expectation values $(2)$ using $(5)$ and $(6)$.
```
# function that calculates CHSH correlation value
def chsh_corr(result):
# lists with the counts of measurement results
# each element represents the number of (-1,-1), (-1,1), (1,-1) and (1,1) results respectively
countA1B1 = [0, 0, 0, 0] # XW observable
countA1B3 = [0, 0, 0, 0] # XV observable
countA3B1 = [0, 0, 0, 0] # ZW observable
countA3B3 = [0, 0, 0, 0] # ZV observable
for i in range(numberOfSinglets):
res = list(result.get_counts(circuits[i]).keys())[0]
# if the spins of the qubits of the i-th singlet were projected onto the a_1/b_1 directions
if (aliceMeasurementChoices[i] == 1 and bobMeasurementChoices[i] == 1):
for j in range(4):
if abPatterns[j].search(res):
countA1B1[j] += 1
if (aliceMeasurementChoices[i] == 1 and bobMeasurementChoices[i] == 3):
for j in range(4):
if abPatterns[j].search(res):
countA1B3[j] += 1
if (aliceMeasurementChoices[i] == 3 and bobMeasurementChoices[i] == 1):
for j in range(4):
if abPatterns[j].search(res):
countA3B1[j] += 1
# if the spins of the qubits of the i-th singlet were projected onto the a_3/b_3 directions
if (aliceMeasurementChoices[i] == 3 and bobMeasurementChoices[i] == 3):
for j in range(4):
if abPatterns[j].search(res):
countA3B3[j] += 1
# number of the results obtained from the measurements in a particular basis
total11 = sum(countA1B1)
total13 = sum(countA1B3)
total31 = sum(countA3B1)
total33 = sum(countA3B3)
# expectation values of XW, XV, ZW and ZV observables (2)
expect11 = (countA1B1[0] - countA1B1[1] - countA1B1[2] + countA1B1[3])/total11 # -1/sqrt(2)
expect13 = (countA1B3[0] - countA1B3[1] - countA1B3[2] + countA1B3[3])/total13 # 1/sqrt(2)
expect31 = (countA3B1[0] - countA3B1[1] - countA3B1[2] + countA3B1[3])/total31 # -1/sqrt(2)
expect33 = (countA3B3[0] - countA3B3[1] - countA3B3[2] + countA3B3[3])/total33 # -1/sqrt(2)
corr = expect11 - expect13 + expect31 + expect33 # calculate the CHSC correlation value (3)
return corr
```
### *Output*
Now let us print all the interesting values.
```
corr = chsh_corr(result) # CHSH correlation value
# CHSH inequality test
print('CHSH correlation value: ' + str(round(corr, 3)))
# Keys
print('Length of the key: ' + str(keyLength))
print('Number of mismatching bits: ' + str(abKeyMismatches) + '\n')
```
Finaly, Alice and Bob have the secret keys $k$ and $k^{'}$ (*aliceKey* and *bobKey*)!
Now they can use the one-time pad technique to encrypt and decrypt messages.
Since we simulate the E91 protocol without the presence of Eve, the CHSH correlation value should be close to $-2\sqrt{2} \approx -2.828$.
In addition, there should be no mismatching bits in the keys of Alice and Bob.
Note also that there are 9 possible combinations of measurements that can be performed by Alice and Bob, but only 2 of them give the results using which the secret keys can be created.
Thus, the ratio of the length of the keys to the number of singlets $N$ should be close to $2/9$.
## *Simulation of eavesdropping*
Suppose some third party wants to interfere in the communication session of Alice and Bob and obtain a secret key.
The eavesdropper can use the *intercept-resend* attacks: Eve intercepts one or both of the entangled qubits prepared by Charlie, measures the spin projections of these qubits, prepares new ones depending on the results obtained ($\lvert 01 \rangle$ or $\lvert 10 \rangle$) and sends them to Alice and Bob.
A schematic representation of this process is shown in the figure below.

Here $E(\vec{n}_A) = \vec{n}_A \cdot \vec{\sigma}$ and $E(\vec{n}_B) = \vec{n}_A \cdot \vec{\sigma}$ are the observables of the of the spin projections of Alice's and Bob's qubits onto the directions $\vec{n}_A$ and $\vec{n}_B$.
It would be wise for Eve to choose these directions to be $\vec{n}_A = \vec{a}_2,\vec{a}_3$ and $\vec{n}_B = \vec{b}_1,\vec{b}_2$ since the results obtained from other measurements can not be used to create a secret key.
Let us prepare the circuits for Eve's measurements.
```
# measurement of the spin projection of Alice's qubit onto the a_2 direction (W basis)
measureEA2 = Q_program.create_circuit('measureEA2', [qr], [cr])
measureEA2.s(qr[0])
measureEA2.h(qr[0])
measureEA2.t(qr[0])
measureEA2.h(qr[0])
measureEA2.measure(qr[0],cr[2])
# measurement of the spin projection of Allice's qubit onto the a_3 direction (standard Z basis)
measureEA3 = Q_program.create_circuit('measureEA3', [qr], [cr])
measureEA3.measure(qr[0],cr[2])
# measurement of the spin projection of Bob's qubit onto the b_1 direction (W basis)
measureEB1 = Q_program.create_circuit('measureEB1', [qr], [cr])
measureEB1.s(qr[1])
measureEB1.h(qr[1])
measureEB1.t(qr[1])
measureEB1.h(qr[1])
measureEB1.measure(qr[1],cr[3])
# measurement of the spin projection of Bob's qubit onto the b_2 direction (standard Z measurement)
measureEB2 = Q_program.create_circuit('measureEB2', [qr], [cr])
measureEB2.measure(qr[1],cr[3])
# lists of measurement circuits
eveMeasurements = [measureEA2, measureEA3, measureEB1, measureEB2]
```
Like Alice and Bob, Eve must choose the directions onto which she will measure the spin projections of the qubits.
In our simulation, the eavesdropper randomly chooses one of the observables $W \otimes W$ or $Z \otimes Z$ to measure.
```
# list of Eve's measurement choices
# the first and the second elements of each row represent the measurement of Alice's and Bob's qubits by Eve respectively
eveMeasurementChoices = []
for j in range(numberOfSinglets):
if random.uniform(0, 1) <= 0.5: # in 50% of cases perform the WW measurement
eveMeasurementChoices.append([0, 2])
else: # in 50% of cases perform the ZZ measurement
eveMeasurementChoices.append([1, 3])
```
Like we did before, now we create the circuits with singlet states and detectors of Eve, Alice and Bob.
```
circuits = [] # the list in which the created circuits will be stored
for j in range(numberOfSinglets):
# create the name of the j-th circuit depending on Alice's, Bob's and Eve's choices of measurement
circuitName = str(j) + ':A' + str(aliceMeasurementChoices[j]) + '_B' + str(bobMeasurementChoices[j] + 2) + '_E' + str(eveMeasurementChoices[j][0]) + str(eveMeasurementChoices[j][1] - 1)
# create the joint measurement circuit
# add Alice's and Bob's measurement circuits to the singlet state curcuit
Q_program.add_circuit(circuitName,
singlet + # singlet state circuit
eveMeasurements[eveMeasurementChoices[j][0]-1] + # Eve's measurement circuit of Alice's qubit
eveMeasurements[eveMeasurementChoices[j][1]-1] + # Eve's measurement circuit of Bob's qubit
aliceMeasurements[aliceMeasurementChoices[j]-1] + # measurement circuit of Alice
bobMeasurements[bobMeasurementChoices[j]-1] # measurement circuit of Bob
)
# add the created circuit to the circuits list
circuits.append(circuitName)
```
Now we execute all the prepared circuits on the simulator.
```
result = Q_program.execute(circuits, backend='local_qasm_simulator', shots=1, max_credits=5, wait=10, timeout=240)
print(result)
```
Let us look at the name of the first circuit and the output after it is executed.
```
print(str(circuits[0]) + '\t' + str(result.get_counts(circuits[0])))
```
We can see onto which directions Eve, Alice and Bob measured the spin projections and the results obtained.
Recall that the bits *cr\[2\]* and *cr\[3\]* (two digits on the left) are used by Eve to store the results of her measurements.
To extract Eve's results from the outputs, we need to compile new search patterns.
```
ePatterns = [
re.compile('00..$'), # search for the '00..' result (Eve obtained the results -1 and -1 for Alice's and Bob's qubits)
re.compile('01..$'), # search for the '01..' result (Eve obtained the results 1 and -1 for Alice's and Bob's qubits)
re.compile('10..$'),
re.compile('11..$')
]
```
Now Eve, Alice and Bob record the results of their measurements.
```
aliceResults = [] # Alice's results (string a)
bobResults = [] # Bob's results (string a')
# list of Eve's measurement results
# the elements in the 1-st column are the results obtaned from the measurements of Alice's qubits
# the elements in the 2-nd column are the results obtaned from the measurements of Bob's qubits
eveResults = []
# recording the measurement results
for j in range(numberOfSinglets):
res = list(result.get_counts(circuits[j]).keys())[0] # extract a key from the dict and transform it to str
# Alice and Bob
if abPatterns[0].search(res): # check if the key is '..00' (if the measurement results are -1,-1)
aliceResults.append(-1) # Alice got the result -1
bobResults.append(-1) # Bob got the result -1
if abPatterns[1].search(res):
aliceResults.append(1)
bobResults.append(-1)
if abPatterns[2].search(res): # check if the key is '..10' (if the measurement results are -1,1)
aliceResults.append(-1) # Alice got the result -1
bobResults.append(1) # Bob got the result 1
if abPatterns[3].search(res):
aliceResults.append(1)
bobResults.append(1)
# Eve
if ePatterns[0].search(res): # check if the key is '00..'
eveResults.append([-1, -1]) # results of the measurement of Alice's and Bob's qubits are -1,-1
if ePatterns[1].search(res):
eveResults.append([1, -1])
if ePatterns[2].search(res):
eveResults.append([-1, 1])
if ePatterns[3].search(res):
eveResults.append([1, 1])
```
As before, Alice, Bob and Eve create the secret keys using the results obtained after measuring the observables $W \otimes W$ and $Z \otimes Z$.
```
aliceKey = [] # Alice's key string a
bobKey = [] # Bob's key string a'
eveKeys = [] # Eve's keys; the 1-st column is the key of Alice, and the 2-nd is the key of Bob
# comparing the strings with measurement choices (b and b')
for j in range(numberOfSinglets):
# if Alice and Bob measured the spin projections onto the a_2/b_1 or a_3/b_2 directions
if (aliceMeasurementChoices[j] == 2 and bobMeasurementChoices[j] == 1) or (aliceMeasurementChoices[j] == 3 and bobMeasurementChoices[j] == 2):
aliceKey.append(aliceResults[j]) # record the i-th result obtained by Alice as the bit of the secret key k
bobKey.append(-bobResults[j]) # record the multiplied by -1 i-th result obtained Bob as the bit of the secret key k'
eveKeys.append([eveResults[j][0], -eveResults[j][1]]) # record the i-th bits of the keys of Eve
keyLength = len(aliceKey) # length of the secret skey
```
To find out the number of mismatching bits in the keys of Alice, Bob and Eve we compare the lists *aliceKey*, *bobKey* and *eveKeys*.
```
abKeyMismatches = 0 # number of mismatching bits in the keys of Alice and Bob
eaKeyMismatches = 0 # number of mismatching bits in the keys of Eve and Alice
ebKeyMismatches = 0 # number of mismatching bits in the keys of Eve and Bob
for j in range(keyLength):
if aliceKey[j] != bobKey[j]:
abKeyMismatches += 1
if eveKeys[j][0] != aliceKey[j]:
eaKeyMismatches += 1
if eveKeys[j][1] != bobKey[j]:
ebKeyMismatches += 1
```
It is also good to know what percentage of the keys is known to Eve.
```
eaKnowledge = (keyLength - eaKeyMismatches)/keyLength # Eve's knowledge of Bob's key
ebKnowledge = (keyLength - ebKeyMismatches)/keyLength # Eve's knowledge of Alice's key
```
Using the *chsh_corr* function defined above we calculate the CSHS correlation value.
```
corr = chsh_corr(result)
```
And now we print all the results.
```
# CHSH inequality test
print('CHSH correlation value: ' + str(round(corr, 3)) + '\n')
# Keys
print('Length of the key: ' + str(keyLength))
print('Number of mismatching bits: ' + str(abKeyMismatches) + '\n')
print('Eve\'s knowledge of Alice\'s key: ' + str(round(eaKnowledge * 100, 2)) + ' %')
print('Eve\'s knowledge of Bob\'s key: ' + str(round(ebKnowledge * 100, 2)) + ' %')
```
Due to Eve's interference in the communication session, the CHSH correlation value is far away from $-2 \sqrt{2}$.
Alice and Bob see it and will not use the secret key to encrypt and decrypt any messages.
It has been shown by Ekert that for any eavesdropping strategy and for any directions $\vec{n}_A$, $\vec{n}_B$ onto which Eve measures the spin projections of Alice's and Bob's qubits the following inequality can be written:
$$ -\sqrt{2} \leqslant C \leqslant \sqrt{2},$$
where $C$ is CHSH correlation value (3).
The more Eve interferes in the communication session, the more she knows about the secret keys.
But at the same time, the deviation of the CHSH correlation value from $-2\sqrt{2}$ also increases.
We can see that there are the mismatches in the keys of Alice and Bob.
Where do they come from?
After Eve measures the qubits of the singlet state $\lvert \psi_s \rangle$, she randomly obtains the results $-1,1$ or $1,-1$ (see Eq. (1)).
Depending on the results obtained, the eavesdropper prepares the state $\lvert \varphi_1 \rangle = \lvert 01 \rangle$ or $\lvert \varphi_2 \rangle = \lvert 10 \rangle$ (in our simulation it is automatically provided by a measurement in the $Z$ basis) and sends its qubits to Alice and Bob.
When Alice and Bob measure the observable $W \otimes W$, they obtain any combination of results with probability $\mathrm{P}_{\varphi_{n}}(a_i, b_j)$.
To see this, one can compare the results of the execution of Quantum Scores of [$W_E \otimes W_E \vert W_A \otimes W_B$](https://quantumexperience.ng.bluemix.net/share/code/1c4d96685cb20c2b99e43f9999b28313/execution/917dca7c81dfda7886af97eef85d5946) and [$W_E \otimes W_E \vert Z_A \otimes Z_B$](https://quantumexperience.ng.bluemix.net/share/code/0d378f5f16990ab3e47546ae4b0c39d2/execution/e836b67e10e9d11d7828a67a834cf4fd) measurements (the subscripts denote who performs the measurement).
In order to correct the mismatches in the keys of Alice and Bob classical error reconciliation algorithms are used.
A very good description of the error correction methods can be found in [Quantum cryptography](https://arxiv.org/abs/quant-ph/0101098) by N. Gisin et al.
| github_jupyter |
# 2A.eco - Python et la logique SQL - correction
Correction d'exercices sur SQL.
```
from jyquickhelper import add_notebook_menu
add_notebook_menu()
```
SQL permet de crรฉer des tables, de rechercher, d'ajouter, de modifier ou de supprimer des donnรฉes dans les bases de donnรฉes.
Un peu ce que vous ferez bientรดt tous les jours. Cโest un langage de management de donnรฉes, pas de nettoyage, dโanalyse ou de statistiques avancรฉes.
Les instructions SQL s'รฉcrivent d'une maniรจre qui ressemble ร celle de phrases ordinaires en anglais. Cette ressemblance voulue vise ร faciliter l'apprentissage et la lecture. Il est nรฉanmoins important de respecter un ordre pour les diffรฉrentes instructions.
Dans ce TD, nous allons รฉcrire des commandes en SQL via Python.
Pour plus de prรฉcisions sur SQL et les commandes qui existent, rendez-vous lร [SQL, PRINCIPES DE BASE](http://www.xavierdupre.fr/app/ensae_teaching_cs/helpsphinx/ext2a/sql_doc.html).
## Se connecter ร une base de donnรฉes
A la diffรฉrence des tables qu'on utilise habituellement, la base de donnรฉes n'est pas visible directement en ouvrant Excel ou un รฉditeur de texte. Pour avoir une vue de ce que contient la base de donnรฉes, il est nรฉcessaire d'avoir un autre type de logiciel.
Pour le TD, nous vous recommandans d'installer SQLLiteSpy (disponible ร cette adresse [SqliteSpy](http://www.yunqa.de/delphi/products/sqlitespy/index) ou [sqlite_bro](https://pypi.python.org/pypi/sqlite_bro) si vous voulez voir ร quoi ressemble les donnรฉes avant de les utiliser avec Python.
```
import sqlite3
# on va se connecter ร une base de donnรฉes SQL vide
# SQLite stocke la BDD dans un simple fichier
filepath = "./DataBase.db"
open(filepath, 'w').close() #crรฉe un fichier vide
CreateDataBase = sqlite3.connect(filepath)
QueryCurs = CreateDataBase.cursor()
```
La mรฉthode cursor() est un peu particuliรจre :
Il s'agit d'une sorte de tampon mรฉmoire intermรฉdiaire, destinรฉ ร mรฉmoriser temporairement les donnรฉes en cours de traitement, ainsi que les opรฉrations que vous effectuez sur elles, avant leur transfert dรฉfinitif dans la base de donnรฉes. Tant que la mรฉthode .commit() n'aura pas รฉtรฉ appelรฉe, aucun ordre ne sera appliquรฉ ร la base de donnรฉes.
--------------------
A prรฉsent que nous sommes connectรฉs ร la base de donnรฉes, on va crรฉer une table qui contient plusieurs variables de format diffรฉrents
- ID sera la clรฉ primaire de la base
- Nom, Rue, Ville, Pays seront du text
- Prix sera un rรฉel
```
# On dรฉfinit une fonction de crรฉation de table
def CreateTable(nom_bdd):
QueryCurs.execute('''CREATE TABLE IF NOT EXISTS ''' + nom_bdd + '''
(id INTEGER PRIMARY KEY, Name TEXT,City TEXT, Country TEXT, Price REAL)''')
# On dรฉfinit une fonction qui permet d'ajouter des observations dans la table
def AddEntry(nom_bdd, Nom,Ville,Pays,Prix):
QueryCurs.execute('''INSERT INTO ''' + nom_bdd + '''
(Name,City,Country,Price) VALUES (?,?,?,?)''',(Nom,Ville,Pays,Prix))
def AddEntries(nom_bdd, data):
""" data : list with (Name,City,Country,Price) tuples to insert
"""
QueryCurs.executemany('''INSERT INTO ''' + nom_bdd + '''
(Name,City,Country,Price) VALUES (?,?,?,?)''',data)
### On va crรฉer la table clients
CreateTable('Clients')
AddEntry('Clients','Toto','Munich','Germany',5.2)
AddEntries('Clients',
[('Bill','Berlin','Germany',2.3),
('Tom','Paris','France',7.8),
('Marvin','Miami','USA',15.2),
('Anna','Paris','USA',7.8)])
# on va "commit" c'est ร dire qu'on va valider la transaction.
# > on va envoyer ses modifications locales vers le rรฉfรฉrentiel central - la base de donnรฉes SQL
CreateDataBase.commit()
```
### Voir la table
Pour voir ce qu'il y a dans la table, on utilise un premier Select oรน on demande ร voir toute la table
```
QueryCurs.execute('SELECT * FROM Clients')
Values = QueryCurs.fetchall()
print(Values)
```
### Passer en pandas
Rien de plus simple : plusieurs maniรจres de faire
```
import pandas as pd
# mรฉthode SQL Query
df1 = pd.read_sql_query('SELECT * FROM Clients', CreateDataBase)
print("En utilisant la mรฉthode read_sql_query \n", df1.head(), "\n")
#mรฉthode DataFrame en utilisant la liste issue de .fetchall()
df2 = pd.DataFrame(Values, columns=['ID','Name','City','Country','Price'])
print("En passant par une DataFrame \n", df2.head())
```
## Comparaison SQL et pandas
### SELECT
En SQL, la sรฉlection se fait en utilisant des virgules ou * si on veut sรฉlectionner toutes les colonnes
```
# en SQL
QueryCurs.execute('SELECT ID,City FROM Clients LIMIT 2')
Values = QueryCurs.fetchall()
print(Values)
```
En pandas, la sรฉlection de colonnes se fait en donnant une liste
```
#sur la table
df2[['ID','City']].head(2)
```
### WHERE
En SQL, on utilise WHERE pour filtrer les tables selon certaines conditions
```
QueryCurs.execute('SELECT * FROM Clients WHERE City=="Paris"')
print(QueryCurs.fetchall())
```
Avec Pandas, on peut utiliser plusieurs maniรจres de faire :
- avec un boolรฉen
- en utilisant la mรฉthode 'query'
```
df2[df2['City'] == "Paris"]
df2.query('City == "Paris"')
```
Pour mettre plusieurs conditions, on utilise :
- & en Python, AND en SQL
- | en python, OR en SQL
```
QueryCurs.execute('SELECT * FROM Clients WHERE City=="Paris" AND Country == "USA"')
print(QueryCurs.fetchall())
df2.query('City == "Paris" & Country == "USA"')
df2[(df2['City'] == "Paris") & (df2['Country'] == "USA")]
```
## GROUP BY
En pandas, l'opรฉration GROUP BY de SQL s'effectue avec une mรฉthode similaire : groupby()
groupby() sert ร regrouper des observations en groupes selon les modalitรฉs de certaines variables en appliquant une fonction d'aggrรฉgation sur d'autres variables.
```
QueryCurs.execute('SELECT Country, count(*) FROM Clients GROUP BY Country')
print(QueryCurs.fetchall())
```
Attention, en pandas, la fonction count() ne fait pas la mรชme chose qu'en SQL. Count() s'applique ร toutes les colonnes et compte toutes les observations non nulles.
```
df2.groupby('Country').count()
```
Pour rรฉaliser la mรชme chose qu'en SQL, il faut utiliser la mรฉthode size()
```
df2.groupby('Country').size()
```
On peut aussi appliquer des fonctions plus sophistiquรฉes lors d'un groupby
```
QueryCurs.execute('SELECT Country, AVG(Price), count(*) FROM Clients GROUP BY Country')
print(QueryCurs.fetchall())
```
Avec pandas, on peut appeler les fonctions classiques de numpy
```
import numpy as np
df2.groupby('Country').agg({'Price': np.mean, 'Country': np.size})
```
Ou utiliser des fonctions lambda
```
# par exemple calculer le prix moyen et le multiplier par 2
df2.groupby('Country')['Price'].apply(lambda x: 2*x.mean())
QueryCurs.execute('SELECT Country, 2*AVG(Price) FROM Clients GROUP BY Country').fetchall()
QueryCurs.execute('SELECT * FROM Clients WHERE Country == "Germany"')
print(QueryCurs.fetchall())
QueryCurs.execute('SELECT * FROM Clients WHERE City=="Berlin" AND Country == "Germany"')
print(QueryCurs.fetchall())
QueryCurs.execute('SELECT * FROM Clients WHERE Price BETWEEN 7 AND 20')
print(QueryCurs.fetchall())
```
## Enregistrer une table SQL sous un autre format
On utilise le package csv, l'option 'w' pour 'write'.
On crรฉe l'objet "writer", qui vient du package csv.
Cet objet a deux mรฉthodes :
- writerow pour les noms de colonnes : une liste
- writerows pour les lignes : un ensemble de liste
```
data = QueryCurs.execute('SELECT * FROM Clients')
import csv
with open('./output.csv', 'w') as file:
writer = csv.writer(file)
writer.writerow(['id','Name','City','Country','Price'])
writer.writerows(data)
```
On peut รฉgalement passer par un DataFrame pandas et utiliser .to_csv()
```
QueryCurs.execute('''DROP TABLE Clients''')
#QueryCurs.close()
```
## Exercice
Dans cet exercice, nous allons manipuler les tables de la base de donnรฉes World.
Avant tout, connectez vous ร la base de donรฉnes en utilisant sqlite3 et connect
Lien vers la base de donnรฉes : [World.db3](https://github.com/sdpython/ensae_teaching_cs/raw/master/src/ensae_teaching_cs/data/data_sql/World.db3) ou
```
from ensae_teaching_cs.data import simple_database
name = simple_database()
```
```
#Se connecter ร la base de donnรฉes WORLD
CreateDataBase = sqlite3.connect("./World.db3")
QueryCurs = CreateDataBase.cursor()
```
Familiarisez vous avec la base de donnรฉes : quelles sont les tables ? quelles sont les variables de ces tables ?
- utilisez la fonction PRAGMA pour obtenir des informations sur les tables
```
# pour obtenir la liste des tables dans la base de donnรฉes
tables = QueryCurs.execute("SELECT name FROM sqlite_master WHERE type='table';").fetchall()
# on veut voir les colonnes de chaque table ainsi que la premiรจre ligne
for table in tables :
print("Table :", table[0])
schema = QueryCurs.execute("PRAGMA table_info({})".format(table[0])).fetchall()
print("Colonnes", ["{}".format(x[1]) for x in schema])
print("1รจre ligne", QueryCurs.execute('SELECT * FROM {} LIMIT 1'.format(table[0])).fetchall(), "\n")
```
## Question 1
- Quels sont les 10 pays qui ont le plus de langues ?
- Quelle langue est prรฉsente dans le plus de pays ?
```
QueryCurs.execute("""SELECT CountryCode, COUNT(*) as NB
FROM CountryLanguage
GROUP BY CountryCode
ORDER BY NB DESC
LIMIT 10""").fetchall()
QueryCurs.execute('''SELECT Language, COUNT(*) as NB
FROM CountryLanguage
GROUP BY Language
ORDER BY -NB
LIMIT 1''').fetchall()
```
## Question 2
- Quelles sont les diffรฉrentes formes de gouvernements dans les pays du monde ?
- Quels sont les 3 gouvernements oรน la population est la plus importante ?
```
QueryCurs.execute('''SELECT DISTINCT GovernmentForm FROM Country''').fetchall()
QueryCurs.execute('''SELECT GovernmentForm, SUM(Population) as Pop_Totale_Gouv
FROM Country
GROUP BY GovernmentForm
ORDER BY Pop_Totale_Gouv DESC
LIMIT 3
''').fetchall()
```
## Question 3
- Combien de pays ont Elisabeth II ร la tรชte de leur gouvernement ?
- Quelle proporition des sujets de Sa Majestรฉ ne parlent pas anglais ?
- 78 % ou 83% ?
```
QueryCurs.execute('''SELECT HeadOfState, Count(*)
FROM Country
WHERE HeadOfState = "Elisabeth II" ''').fetchall()
# la population totale
population_queen_elisabeth = QueryCurs.execute('''SELECT HeadOfState, SUM(Population)
FROM Country
WHERE HeadOfState = "Elisabeth II"''').fetchall()
# La part de la population parlant anglais
Part_parlant_anglais= QueryCurs.execute('''SELECT Language, SUM(Percentage*0.01*Population)
FROM
Country
LEFT JOIN
CountryLanguage
ON Country.Code = CountryLanguage.CountryCode
WHERE HeadOfState = "Elisabeth II"
AND Language = "English"
''').fetchall()
# La rรฉponse est 78% d'aprรจs ces donnรฉes
Part_parlant_anglais[0][1]/population_queen_elisabeth[0][1]
## on trouve 83% si on ne fait pas attention au fait que dans certaines zones, 0% de la population parle anglais
## La population totale n'est alors pas la bonne, comme dans cet exemple
QueryCurs.execute('''SELECT Language,
SUM(Population_pays*0.01*Percentage) as Part_parlant_anglais, SUM(Population_pays) as Population_totale
FROM (SELECT Language, Code, Percentage, SUM(Population) as Population_pays
FROM
Country
LEFT JOIN
CountryLanguage
ON Country.Code = CountryLanguage.CountryCode
WHERE HeadOfState = "Elisabeth II" AND Language == "English"
GROUP BY Code)''').fetchall()
```
Conclusion: il vaut mieux รฉcrire deux requรชtes simples et lisibles pour obtenir le bon rรฉsultat, plutรดt qu'une requรชte qui fait tout en une seule passe mais dont on va devoir vรฉrifier la correction longuement...
## Question 4 - passons ร Pandas
Crรฉer une DataFrame qui contient les informations suivantes par pays :
- le nom
- le code du pays
- le nombre de langues parlรฉes
- le nombre de langues officielles
- la population
- le GNP
- l'espรฉrance de vie
**Indice : utiliser la commande pd.read_sql_query**
Que dit la matrice de corrรฉlation de ces variables ?
```
df = pd.read_sql_query('''SELECT Code, Name, Population, GNP , LifeExpectancy,
COUNT(*) as Nb_langues_parlees, SUM(IsOfficial) as Nb_langues_officielles
FROM Country
INNER JOIN CountryLanguage ON Country.Code = CountryLanguage.CountryCode
GROUP BY Country.Code''',
CreateDataBase)
df.head()
df.corr()
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import scipy
import scipy.linalg
import matplotlib.pyplot as plt
import sklearn.metrics
import sklearn.neighbors
import time
import os
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset, TensorDataset
import ipdb
import bda_utils
bda_utils.setup_seed(10)
```
# 1. BDA Part
## 1.a. Define BDA methodology
```
def kernel(ker, X1, X2, gamma):
K = None
if not ker or ker == 'primal':
K = X1
elif ker == 'linear':
if X2 is not None:
K = sklearn.metrics.pairwise.linear_kernel(
np.asarray(X1).T, np.asarray(X2).T)
else:
K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1).T)
elif ker == 'rbf':
if X2 is not None:
K = sklearn.metrics.pairwise.rbf_kernel(
np.asarray(X1).T, np.asarray(X2).T, gamma)
else:
K = sklearn.metrics.pairwise.rbf_kernel(
np.asarray(X1).T, None, gamma)
return K
def proxy_a_distance(source_X, target_X):
"""
Compute the Proxy-A-Distance of a source/target representation
"""
nb_source = np.shape(source_X)[0]
nb_target = np.shape(target_X)[0]
train_X = np.vstack((source_X, target_X))
train_Y = np.hstack((np.zeros(nb_source, dtype=int),
np.ones(nb_target, dtype=int)))
clf = svm.LinearSVC(random_state=0)
clf.fit(train_X, train_Y)
y_pred = clf.predict(train_X)
error = metrics.mean_absolute_error(train_Y, y_pred)
dist = 2 * (1 - 2 * error)
return dist
def estimate_mu(_X1, _Y1, _X2, _Y2):
adist_m = proxy_a_distance(_X1, _X2)
C = len(np.unique(_Y1))
epsilon = 1e-3
list_adist_c = []
for i in range(1, C + 1):
ind_i, ind_j = np.where(_Y1 == i), np.where(_Y2 == i)
Xsi = _X1[ind_i[0], :]
Xtj = _X2[ind_j[0], :]
adist_i = proxy_a_distance(Xsi, Xtj)
list_adist_c.append(adist_i)
adist_c = sum(list_adist_c) / C
mu = adist_c / (adist_c + adist_m)
if mu > 1:
mu = 1
if mu < epsilon:
mu = 0
return mu
class BDA:
def __init__(self, kernel_type='primal', dim=30, lamb=1, mu=0.5, gamma=1, T=10, mode='BDA', estimate_mu=False):
'''
Init func
:param kernel_type: kernel, values: 'primal' | 'linear' | 'rbf'
:param dim: dimension after transfer
:param lamb: lambda value in equation
:param mu: mu. Default is -1, if not specificied, it calculates using A-distance
:param gamma: kernel bandwidth for rbf kernel
:param T: iteration number
:param mode: 'BDA' | 'WBDA'
:param estimate_mu: True | False, if you want to automatically estimate mu instead of manally set it
'''
self.kernel_type = kernel_type
self.dim = dim
self.lamb = lamb
self.mu = mu
self.gamma = gamma
self.T = T
self.mode = mode
self.estimate_mu = estimate_mu
def fit(self, Xs, Ys, Xt, Yt):
'''
Transform and Predict using 1NN as JDA paper did
:param Xs: ns * n_feature, source feature
:param Ys: ns * 1, source label
:param Xt: nt * n_feature, target feature
:param Yt: nt * 1, target label
:return: acc, y_pred, list_acc
'''
# ipdb.set_trace()
list_acc = []
X = np.hstack((Xs.T, Xt.T)) # X.shape: [n_feature, ns+nt]
X_mean = np.linalg.norm(X, axis=0) # why it's axis=0? the average of features
X_mean[X_mean==0] = 1
X /= X_mean
m, n = X.shape
ns, nt = len(Xs), len(Xt)
e = np.vstack((1 / ns * np.ones((ns, 1)), -1 / nt * np.ones((nt, 1))))
C = np.unique(Ys)
H = np.eye(n) - 1 / n * np.ones((n, n))
mu = self.mu
M = 0
Y_tar_pseudo = None
Xs_new = None
for t in range(self.T):
print('\tStarting iter %i'%t)
N = 0
M0 = e * e.T * len(C)
# ipdb.set_trace()
if Y_tar_pseudo is not None:
for i in range(len(C)):
e = np.zeros((n, 1))
Ns = len(Ys[np.where(Ys == C[i])])
Nt = len(Y_tar_pseudo[np.where(Y_tar_pseudo == C[i])])
if self.mode == 'WBDA':
Ps = Ns / len(Ys)
Pt = Nt / len(Y_tar_pseudo)
alpha = Pt / Ps
# mu = 1
else:
alpha = 1
tt = Ys == C[i]
e[np.where(tt == True)] = 1 / Ns
# ipdb.set_trace()
yy = Y_tar_pseudo == C[i]
ind = np.where(yy == True)
inds = [item + ns for item in ind]
try:
e[tuple(inds)] = -alpha / Nt
e[np.isinf(e)] = 0
except:
e[tuple(inds)] = 0 # ๏ผ
N = N + np.dot(e, e.T)
# ipdb.set_trace()
# In BDA, mu can be set or automatically estimated using A-distance
# In WBDA, we find that setting mu=1 is enough
if self.estimate_mu and self.mode == 'BDA':
if Xs_new is not None:
mu = estimate_mu(Xs_new, Ys, Xt_new, Y_tar_pseudo)
else:
mu = 0
# ipdb.set_trace()
M = (1 - mu) * M0 + mu * N
M /= np.linalg.norm(M, 'fro')
# ipdb.set_trace()
K = kernel(self.kernel_type, X, None, gamma=self.gamma)
n_eye = m if self.kernel_type == 'primal' else n
a, b = np.linalg.multi_dot([K, M, K.T]) + self.lamb * np.eye(n_eye), np.linalg.multi_dot([K, H, K.T])
w, V = scipy.linalg.eig(a, b)
ind = np.argsort(w)
A = V[:, ind[:self.dim]]
Z = np.dot(A.T, K)
Z_mean = np.linalg.norm(Z, axis=0) # why it's axis=0?
Z_mean[Z_mean==0] = 1
Z /= Z_mean
Xs_new, Xt_new = Z[:, :ns].T, Z[:, ns:].T
global device
model = sklearn.svm.SVC(kernel='linear').fit(Xs_new, Ys.ravel())
Y_tar_pseudo = model.predict(Xt_new)
# ipdb.set_trace()
acc = sklearn.metrics.mean_squared_error(Y_tar_pseudo, Yt) # Yt is already in classes
print(acc)
return Xs_new, Xt_new, A #, acc, Y_tar_pseudo, list_acc
```
## 1.b. Load Data
```
Xs, Xt = bda_utils.load_data(if_weekday=1, if_interdet=1)
Xs = Xs[:,8:9]
Xt = Xt[:,8:9]
Xs, Xs_min, Xs_max = bda_utils.normalize2D(Xs)
Xt, Xt_min, Xt_max = bda_utils.normalize2D(Xt)
for i in range(Xs.shape[1]):
plt.figure(figsize=[20,4])
plt.plot(Xs[:, i])
plt.plot(Xt[:, i])
```
## 1.d. Hyperparameters
```
label_seq_len = 7
# batch_size = full batch
seq_len = 12
reduced_dim = 4
inp_dim = min(Xs.shape[1], Xt.shape[1])
label_dim = min(Xs.shape[1], Xt.shape[1])
hid_dim = 12
layers = 1
lamb = 2
MU = 0.7
bda_dim = label_seq_len-4
kernel_type = 'linear'
hyper = {
'inp_dim':inp_dim,
'label_dim':label_dim,
'label_seq_len':label_seq_len,
'seq_len':seq_len,
'reduced_dim':reduced_dim,
'hid_dim':hid_dim,
'layers':layers,
'lamb':lamb,
'MU': MU,
'bda_dim':bda_dim,
'kernel_type':kernel_type}
hyper = pd.DataFrame(hyper, index=['Values'])
hyper
```
## 1.e. Apply BDA and get $Xs_{new}$, $Xt_{new}$
```
Xs = Xs[:96, :]
# [sample size, seq_len, inp_dim (dets)], [sample size, label_seq_len, inp_dim (dets)]
Xs_3d, Ys_3d = bda_utils.sliding_window(Xs, Xs, seq_len, label_seq_len)
Xt_3d, Yt_3d = bda_utils.sliding_window(Xt, Xt, seq_len, label_seq_len)
Ys_3d = Ys_3d[:, label_seq_len-1:, :]
Yt_3d = Yt_3d[:, label_seq_len-1:, :]
print(Xs_3d.shape)
print(Ys_3d.shape)
print(Xt_3d.shape)
print(Yt_3d.shape)
t_s = time.time()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
Xs_train_3d = []
Ys_train_3d = []
Xt_valid_3d = []
Xt_train_3d = []
Yt_valid_3d = []
Yt_train_3d = []
for i in range(Xs_3d.shape[2]):
print('Starting det %i'%i)
bda = BDA(kernel_type='linear', dim=seq_len-reduced_dim, lamb=lamb, mu=MU, gamma=1, T=2) # T is iteration time
Xs_new, Xt_new, A = bda.fit(
Xs_3d[:, :, i], bda_utils.get_class(Ys_3d[:, :, i]), Xt_3d[:, :, i], bda_utils.get_class(Yt_3d[:, :, i])
) # input shape: ns, n_feature | ns, n_label_feature
# normalize
Xs_new, Xs_new_min, Xs_new_max = bda_utils.normalize2D(Xs_new)
Xt_new, Xt_new_min, Xt_new_max = bda_utils.normalize2D(Xt_new)
print(Xs_new.shape)
print(Xt_new.shape)
day_train_t = 1
Xs_train = Xs_new.copy()
Ys_train = Ys_3d[:, :, i]
Xt_valid = Xt_new.copy()[int(96*day_train_t):, :]
Xt_train = Xt_new.copy()[:int(96*day_train_t), :]
Yt_valid = Yt_3d[:, :, i].copy()[int(96*day_train_t):, :]
Yt_train = Yt_3d[:, :, i].copy()[:int(96*day_train_t), :]
print('Time spent:%.5f'%(time.time()-t_s))
print(Xs_train.shape)
print(Ys_train.shape)
print(Xt_valid.shape)
print(Xt_train.shape)
print(Yt_valid.shape)
print(Yt_train.shape)
train_x = np.vstack([Xs_train, Xt_train])
train_y = np.vstack([Ys_train, Yt_train])
```
# 2. Regression Part
```
from sklearn.ensemble import RandomForestRegressor
regr = RandomForestRegressor(max_depth=3, random_state=10)
regr.fit(train_x, train_y.flatten())
```
# 3. Evaluation
```
g_t = Yt_valid.flatten()
pred = regr.predict(Xt_valid)
plt.figure(figsize=[16,4])
plt.plot(g_t, label='label')
plt.plot(pred, label='predict')
plt.legend()
print(bda_utils.nrmse_loss_func(pred, g_t, 0))
print(bda_utils.mape_loss_func(pred, g_t, 0))
print(bda_utils.smape_loss_func(pred, g_t, 0))
print(bda_utils.mae_loss_func(pred, g_t, 0))
pred_base = pd.read_csv('./runs_base/base_data_plot/pred_base_RF.csv', header=None)
g_t_base = pd.read_csv('./runs_base/base_data_plot/g_t_base_RF.csv', header=None)
plt.rc('text', usetex=True)
plt.rcParams["font.family"] = "Times New Roman"
plt.figure(figsize=[20, 6], dpi=300)
diff = g_t_base.shape[0]-g_t.shape[0]
plt.plot(range(g_t.shape[0]), g_t_base[diff:]*(903-15)+15, 'b', label='Ground Truth')
plt.plot(range(g_t.shape[0]), pred_base[diff:]*(903-15)+15, 'g', label='Base Model (RF)')
# plt.figure()
# plt.plot(range(371), g_t_bda)
plt.plot(range(g_t.shape[0]), pred*(903-15)+15, 'r', label='BDA (RF)')
plt.legend(loc=1, fontsize=18)
plt.xlabel('Time [15 min]', fontsize=18)
plt.ylabel('Flow [veh/hr]', fontsize=18)
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# ๅๅญฆ่
็ TensorFlow 2.0 ๆ็จ
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/quickstart/beginner"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />ๅจ TensorFlow.org ่ง็</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/tutorials/quickstart/beginner.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />ๅจ Google Colab ่ฟ่ก</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/tutorials/quickstart/beginner.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />ๅจ GitHub ๆฅ็ๆบไปฃ็ </a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/tutorials/quickstart/beginner.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />ไธ่ฝฝ็ฌ่ฎฐๆฌ</a>
</td>
</table>
Note: ๆไปฌ็ TensorFlow ็คพๅบ็ฟป่ฏไบ่ฟไบๆๆกฃใๅ ไธบ็คพๅบ็ฟป่ฏๆฏๅฐฝๅ่ไธบ๏ผ ๆไปฅๆ ๆณไฟ่ฏๅฎไปฌๆฏๆๅ็กฎ็๏ผๅนถไธๅๆ ไบๆๆฐ็
[ๅฎๆน่ฑๆๆๆกฃ](https://www.tensorflow.org/?hl=en)ใๅฆๆๆจๆๆน่ฟๆญค็ฟป่ฏ็ๅปบ่ฎฎ๏ผ ่ฏทๆไบค pull request ๅฐ
[tensorflow/docs](https://github.com/tensorflow/docs) GitHub ไปๅบใ่ฆๅฟๆฟๅฐๆฐๅๆ่
ๅฎกๆ ธ่ฏๆ๏ผ่ฏทๅ ๅ
ฅ
[docs-zh-cn@tensorflow.org Google Group](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-zh-cn)ใ
่ฟๆฏไธไธช [Google Colaboratory](https://colab.research.google.com/notebooks/welcome.ipynb) ็ฌ่ฎฐๆฌๆไปถใ Python็จๅบๅฏไปฅ็ดๆฅๅจๆต่งๅจไธญ่ฟ่ก๏ผ่ฟๆฏๅญฆไน Tensorflow ็็ปไฝณๆนๅผใๆณ่ฆๅญฆไน ่ฏฅๆ็จ๏ผ่ฏท็นๅปๆญค้กต้ข้กถ้จ็ๆ้ฎ๏ผๅจGoogle Colabไธญ่ฟ่ก็ฌ่ฎฐๆฌใ
1. ๅจ Colabไธญ, ่ฟๆฅๅฐPython่ฟ่ก็ฏๅข๏ผ ๅจ่ๅๆก็ๅณไธๆน, ้ๆฉ *CONNECT*ใ
2. ่ฟ่กๆๆ็ไปฃ็ ๅ: ้ๆฉ *Runtime* > *Run all*ใ
ไธ่ฝฝๅนถๅฎ่ฃ
TensorFlow 2.0 ๆต่ฏ็ๅ
ใๅฐ TensorFlow ่ฝฝๅ
ฅไฝ ็็จๅบ๏ผ
```
from __future__ import absolute_import, division, print_function, unicode_literals
# ๅฎ่ฃ
TensorFlow
try:
# Colab only
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
```
่ฝฝๅ
ฅๅนถๅๅคๅฅฝ [MNIST ๆฐๆฎ้](http://yann.lecun.com/exdb/mnist/)ใๅฐๆ ทๆฌไปๆดๆฐ่ฝฌๆขไธบๆตฎ็นๆฐ๏ผ
```
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
```
ๅฐๆจกๅ็ๅๅฑๅ ๅ ่ตทๆฅ๏ผไปฅๆญๅปบ `tf.keras.Sequential` ๆจกๅใไธบ่ฎญ็ป้ๆฉไผๅๅจๅๆๅคฑๅฝๆฐ๏ผ
```
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
```
่ฎญ็ปๅนถ้ช่ฏๆจกๅ๏ผ
```
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test, verbose=2)
```
็ฐๅจ๏ผ่ฟไธช็
ง็ๅ็ฑปๅจ็ๅ็กฎๅบฆๅทฒ็ป่พพๅฐ 98%ใๆณ่ฆไบ่งฃๆดๅค๏ผ่ฏท้
่ฏป [TensorFlow ๆ็จ](https://www.tensorflow.org/tutorials/)ใ
| github_jupyter |
# OpenVINO benchmarking with 2D U-Net
In this tutorial, we will use the Intelยฎ Distribution of OpenVINOโข Toolkit to perform benchmarking
This tutorial assumes that you have already downloaded and installed [Intel® OpenVINO™](https://software.intel.com/en-us/openvino-toolkit/choose-download) on your computer.
In order to use Intelยฎ OpenVINOโข, we need to do a few steps:
1. Convert our Keras model to a Tensorflow model.
1. Freeze the Tensorflow saved format model
1. Use the OpenVINO Model Optimizer to convert the above freezed-model to the OpenVINO Intermediate Representation (IR) format
1. Benchmark using the OpenVINO benchmark tool: `/opt/intel/openvino/deployment_tools/tools/benchmark_tool/benchmark_app.py`
```
import keras
import os
import tensorflow as tf
import numpy as np
import keras as K
import shutil, sys
def dice_coef(y_true, y_pred, axis=(1, 2), smooth=1):
"""
Sorenson (Soft) Dice
\frac{ 2 \times \left | T \right | \cap \left | P \right |}{ \left | T \right | + \left | P \right | }
where T is ground truth mask and P is the prediction mask
"""
intersection = tf.reduce_sum(y_true * y_pred, axis=axis)
union = tf.reduce_sum(y_true + y_pred, axis=axis)
numerator = tf.constant(2.) * intersection + smooth
denominator = union + smooth
coef = numerator / denominator
return tf.reduce_mean(coef)
def soft_dice_coef(target, prediction, axis=(1, 2), smooth=0.01):
"""
Sorenson (Soft) Dice - Don't round the predictions
\frac{ 2 \times \left | T \right | \cap \left | P \right |}{ \left | T \right | + \left | P \right | }
where T is ground truth mask and P is the prediction mask
"""
intersection = tf.reduce_sum(target * prediction, axis=axis)
union = tf.reduce_sum(target + prediction, axis=axis)
numerator = tf.constant(2.) * intersection + smooth
denominator = union + smooth
coef = numerator / denominator
return tf.reduce_mean(coef)
def dice_coef_loss(target, prediction, axis=(1, 2), smooth=1.):
"""
Sorenson (Soft) Dice loss
Using -log(Dice) as the loss since it is better behaved.
Also, the log allows avoidance of the division which
can help prevent underflow when the numbers are very small.
"""
intersection = tf.reduce_sum(prediction * target, axis=axis)
p = tf.reduce_sum(prediction, axis=axis)
t = tf.reduce_sum(target, axis=axis)
numerator = tf.reduce_mean(intersection + smooth)
denominator = tf.reduce_mean(t + p + smooth)
dice_loss = -tf.log(2.*numerator) + tf.log(denominator)
return dice_loss
def combined_dice_ce_loss(y_true, y_pred, axis=(1, 2), smooth=1.,
weight=0.9):
"""
Combined Dice and Binary Cross Entropy Loss
"""
return weight*dice_coef_loss(y_true, y_pred, axis, smooth) + \
(1-weight)*K.losses.binary_crossentropy(y_true, y_pred)
inference_filename = "unet_decathlon_4_8814_128x128_randomcrop-any-input.h5"
model_filename = os.path.join("/home/ubuntu/models/unet", inference_filename)
# Load model
print("Loading Model... ")
model = K.models.load_model(model_filename, custom_objects={
"combined_dice_ce_loss": combined_dice_ce_loss,
"dice_coef_loss": dice_coef_loss,
"soft_dice_coef": soft_dice_coef,
"dice_coef": dice_coef})
print("Model loaded successfully from: " + model_filename)
sess = keras.backend.get_session()
sess.run(tf.global_variables_initializer())
import shutil, sys
output_directory = "/home/ubuntu/models/unet/output"
print("Freezing the graph.")
keras.backend.set_learning_phase(0)
signature = tf.saved_model.signature_def_utils.predict_signature_def(
inputs={'input': model.input}, outputs={'output': model.output})
#If directory exists, delete it and let builder rebuild the TF model.
if os.path.isdir(output_directory):
print (output_directory, "exists already. Deleting the folder")
shutil.rmtree(output_directory)
builder = tf.saved_model.builder.SavedModelBuilder(output_directory)
builder.add_meta_graph_and_variables(sess=sess,
tags=[tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:signature
}, saver=tf.train.Saver())
builder.save()
print("TensorFlow protobuf version of model is saved in:", output_directory)
print("Model input name = ", model.input.op.name)
print("Model input shape = ", model.input.shape)
print("Model output name = ", model.output.op.name)
print("Model output shape = ", model.output.shape)
output_frozen_model_dir = "/home/ubuntu/models/unet/frozen_model"
output_frozen_graph = output_frozen_model_dir+'/saved_model_frozen.pb'
if not os.path.isdir(output_frozen_model_dir):
os.mkdir(output_frozen_model_dir)
else:
print('Directory', output_frozen_model_dir, 'already exists. Deleting it and re-creating it')
shutil.rmtree(output_frozen_model_dir)
os.mkdir(output_frozen_model_dir)
from tensorflow.python.tools.freeze_graph import freeze_graph
_ = freeze_graph(input_graph="",
input_saver="",
input_binary=False,
input_checkpoint="",
restore_op_name="save/restore_all",
filename_tensor_name="save/Const:0",
clear_devices=True,
initializer_nodes="",
input_saved_model_dir=output_directory,
output_node_names=model.output.op.name,
output_graph=output_frozen_graph)
print("TensorFlow Frozen model model is saved in:", output_frozen_graph)
output_frozen_model_dir = "/home/ubuntu/models/unet/frozen_model"
output_frozen_graph = output_frozen_model_dir+'/saved_model_frozen.pb'
if not os.path.exists(output_frozen_graph):
print(output_frozen_graph + ' doesn\'t exist. Please make sure you have a trained keras to TF frozen model')
!mo_tf.py \
--input_model '/home/ubuntu/models/unet/frozen_model/saved_model_frozen.pb' \
--input_shape=[1,160,160,4] \
--data_type FP32 \
--output_dir /home/ubuntu/models/unet/IR_models/FP32 \
--model_name saved_model
```
#### Run the following command in the terminal
```
mo_tf.py \
--input_model '/home/ubuntu/models/unet/frozen_model/saved_model_frozen.pb' \
--input_shape=[1,160,160,4] \
--data_type FP32 \
--output_dir /home/ubuntu/models/unet/IR_models/FP32 \
--model_name saved_model
```
#### Sample Output:
```
(tensorflow_p36) ubuntu@ip-172-31-46-30:~$ mo_tf.py \
> --input_model '/home/ubuntu/models/unet/frozen_model/saved_model_frozen.pb' \
> --input_shape=[1,160,160,4] \
> --data_type FP32 \
> --output_dir /home/ubuntu/models/unet/IR_models/FP32 \
> --model_name saved_model
Model Optimizer arguments:
Common parameters:
- Path to the Input Model: /home/ubuntu/models/unet/frozen_model/saved_model_frozen.pb
- Path for generated IR: /home/ubuntu/models/unet/IR_models/FP32
- IR output name: saved_model
- Log level: ERROR
- Batch: Not specified, inherited from the model
- Input layers: Not specified, inherited from the model
- Output layers: Not specified, inherited from the model
- Input shapes: [1,160,160,4]
- Mean values: Not specified
- Scale values: Not specified
- Scale factor: Not specified
- Precision of IR: FP32
- Enable fusing: True
- Enable grouped convolutions fusing: True
- Move mean values to preprocess section: False
- Reverse input channels: False
TensorFlow specific parameters:
- Input model in text protobuf format: False
- Path to model dump for TensorBoard: None
- List of shared libraries with TensorFlow custom layers implementation: None
- Update the configuration file with input/output node names: None
- Use configuration file used to generate the model with Object Detection API: None
- Operations to offload: None
- Patterns to offload: None
- Use the config file: None
Model Optimizer version: 2020.1.0-61-gd349c3ba4a
[ SUCCESS ] Generated IR version 10 model.
[ SUCCESS ] XML file: /home/ubuntu/models/unet/IR_models/FP32/saved_model.xml
[ SUCCESS ] BIN file: /home/ubuntu/models/unet/IR_models/FP32/saved_model.bin
[ SUCCESS ] Total execution time: 6.41 seconds.
[ SUCCESS ] Memory consumed: 443 MB.
```
## Benchmark
Benchmark using the following command:
```
python3 /opt/intel/openvino/deployment_tools/tools/benchmark_tool/benchmark_app.py \
-m /home/ubuntu/models/unet/IR_models/FP32/saved_model.xml \
-nireq 1 -nstreams 1
```
#### Sample Output
```
[Step 1/11] Parsing and validating input arguments
[Step 2/11] Loading Inference Engine
[ INFO ] InferenceEngine:
API version............. 2.1.37988
[ INFO ] Device info
CPU
MKLDNNPlugin............ version 2.1
Build................... 37988
[Step 3/11] Reading the Intermediate Representation network
[Step 4/11] Resizing network to match image sizes and given batch
[ INFO ] Network batch size: 1, precision: MIXED
[Step 5/11] Configuring input of the model
[Step 6/11] Setting device configuration
[Step 7/11] Loading the model to the device
[Step 8/11] Setting optimal runtime parameters
[Step 9/11] Creating infer requests and filling input blobs with images
[ INFO ] Network input 'MRImages' precision FP32, dimensions (NCHW): 1 4 160 160
[ WARNING ] No input files were given: all inputs will be filled with random values!
[ INFO ] Infer Request 0 filling
[ INFO ] Fill input 'MRImages' with random values (some binary data is expected)
[Step 10/11] Measuring performance (Start inference asyncronously, 1 inference requests using 1 streams for CPU, limits: 60000 ms duration)
[Step 11/11] Dumping statistics report
Count: 11079 iterations
Duration: 60014.36 ms
Latency: 5.11 ms
Throughput: 184.61 FPS
```
| github_jupyter |
# Convolutional Neural Networks: Step by Step
Welcome to Course 4's first assignment! In this assignment, you will implement convolutional (CONV) and pooling (POOL) layers in numpy, including both forward propagation and (optionally) backward propagation.
**Notation**:
- Superscript $[l]$ denotes an object of the $l^{th}$ layer.
- Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.
- Superscript $(i)$ denotes an object from the $i^{th}$ example.
- Example: $x^{(i)}$ is the $i^{th}$ training example input.
- Lowerscript $i$ denotes the $i^{th}$ entry of a vector.
- Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$, assuming this is a fully connected (FC) layer.
- $n_H$, $n_W$ and $n_C$ denote respectively the height, width and number of channels of a given layer. If you want to reference a specific layer $l$, you can also write $n_H^{[l]}$, $n_W^{[l]}$, $n_C^{[l]}$.
- $n_{H_{prev}}$, $n_{W_{prev}}$ and $n_{C_{prev}}$ denote respectively the height, width and number of channels of the previous layer. If referencing a specific layer $l$, this could also be denoted $n_H^{[l-1]}$, $n_W^{[l-1]}$, $n_C^{[l-1]}$.
We assume that you are already familiar with `numpy` and/or have completed the previous courses of the specialization. Let's get started!
## 1 - Packages
Let's first import all the packages that you will need during this assignment.
- [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.
- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.
- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
```
import numpy as np
import h5py
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
```
## 2 - Outline of the Assignment
You will be implementing the building blocks of a convolutional neural network! Each function you will implement will have detailed instructions that will walk you through the steps needed:
- Convolution functions, including:
- Zero Padding
- Convolve window
- Convolution forward
- Convolution backward (optional)
- Pooling functions, including:
- Pooling forward
- Create mask
- Distribute value
- Pooling backward (optional)
This notebook will ask you to implement these functions from scratch in `numpy`. In the next notebook, you will use the TensorFlow equivalents of these functions to build the following model:
<img src="images/model.png" style="width:800px;height:300px;">
**Note** that for every forward function, there is its corresponding backward equivalent. Hence, at every step of your forward module you will store some parameters in a cache. These parameters are used to compute gradients during backpropagation.
## 3 - Convolutional Neural Networks
Although programming frameworks make convolutions easy to use, they remain one of the hardest concepts to understand in Deep Learning. A convolution layer transforms an input volume into an output volume of different size, as shown below.
<img src="images/conv_nn.png" style="width:350px;height:200px;">
In this part, you will build every step of the convolution layer. You will first implement two helper functions: one for zero padding and the other for computing the convolution function itself.
### 3.1 - Zero-Padding
Zero-padding adds zeros around the border of an image:
<img src="images/PAD.png" style="width:600px;height:400px;">
<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **Zero-Padding**<br> Image (3 channels, RGB) with a padding of 2. </center></caption>
The main benefits of padding are the following:
- It allows you to use a CONV layer without necessarily shrinking the height and width of the volumes. This is important for building deeper networks, since otherwise the height/width would shrink as you go to deeper layers. An important special case is the "same" convolution, in which the height/width is exactly preserved after one layer.
- It helps us keep more of the information at the border of an image. Without padding, very few values at the next layer would be affected by pixels as the edges of an image.
**Exercise**: Implement the following function, which pads all the images of a batch of examples X with zeros. [Use np.pad](https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html). Note if you want to pad the array "a" of shape $(5,5,5,5,5)$ with `pad = 1` for the 2nd dimension, `pad = 3` for the 4th dimension and `pad = 0` for the rest, you would do:
```python
a = np.pad(a, ((0,0), (1,1), (0,0), (3,3), (0,0)), 'constant', constant_values = (..,..))
```
```
# GRADED FUNCTION: zero_pad
def zero_pad(X, pad):
"""
Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image,
as illustrated in Figure 1.
Argument:
X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images
pad -- integer, amount of padding around each image on vertical and horizontal dimensions
Returns:
X_pad -- padded image of shape (m, n_H + 2*pad, n_W + 2*pad, n_C)
"""
### START CODE HERE ### (โ 1 line)
X_pad = np.pad(X, ((0, 0), (pad, pad), (pad, pad), (0, 0)), 'constant', constant_values=0)
### END CODE HERE ###
return X_pad
np.random.seed(1)
x = np.random.randn(4, 3, 3, 2)
x_pad = zero_pad(x, 2)
print ("x.shape =", x.shape)
print ("x_pad.shape =", x_pad.shape)
print ("x[1,1] =", x[1,1])
print ("x_pad[1,1] =", x_pad[1,1])
fig, axarr = plt.subplots(1, 2)
axarr[0].set_title('x')
axarr[0].imshow(x[0,:,:,0])
axarr[1].set_title('x_pad')
axarr[1].imshow(x_pad[0,:,:,0])
```
**Expected Output**:
<table>
<tr>
<td>
**x.shape**:
</td>
<td>
(4, 3, 3, 2)
</td>
</tr>
<tr>
<td>
**x_pad.shape**:
</td>
<td>
(4, 7, 7, 2)
</td>
</tr>
<tr>
<td>
**x[1,1]**:
</td>
<td>
[[ 0.90085595 -0.68372786]
[-0.12289023 -0.93576943]
[-0.26788808 0.53035547]]
</td>
</tr>
<tr>
<td>
**x_pad[1,1]**:
</td>
<td>
[[ 0. 0.]
[ 0. 0.]
[ 0. 0.]
[ 0. 0.]
[ 0. 0.]
[ 0. 0.]
[ 0. 0.]]
</td>
</tr>
</table>
### 3.2 - Single step of convolution
In this part, implement a single step of convolution, in which you apply the filter to a single position of the input. This will be used to build a convolutional unit, which:
- Takes an input volume
- Applies a filter at every position of the input
- Outputs another volume (usually of different size)
<img src="images/Convolution_schematic.gif" style="width:500px;height:300px;">
<caption><center> <u> <font color='purple'> **Figure 2** </u><font color='purple'> : **Convolution operation**<br> with a filter of 2x2 and a stride of 1 (stride = amount you move the window each time you slide) </center></caption>
In a computer vision application, each value in the matrix on the left corresponds to a single pixel value, and we convolve a 3x3 filter with the image by multiplying its values element-wise with the original matrix, then summing them up and adding a bias. In this first step of the exercise, you will implement a single step of convolution, corresponding to applying a filter to just one of the positions to get a single real-valued output.
Later in this notebook, you'll apply this function to multiple positions of the input to implement the full convolutional operation.
**Exercise**: Implement conv_single_step(). [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.sum.html).
```
# GRADED FUNCTION: conv_single_step
def conv_single_step(a_slice_prev, W, b):
"""
Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation
of the previous layer.
Arguments:
a_slice_prev -- slice of input data of shape (f, f, n_C_prev)
W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)
b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)
Returns:
Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data
"""
### START CODE HERE ### (โ 2 lines of code)
# Element-wise product between a_slice and W. Do not add the bias yet.
s = np.multiply(a_slice_prev, W)
# Sum over all entries of the volume s.
Z = np.sum(s)
# Add bias b to Z. Cast b to a float() so that Z results in a scalar value.
Z = Z + float(b)
### END CODE HERE ###
return Z
np.random.seed(1)
a_slice_prev = np.random.randn(4, 4, 3)
W = np.random.randn(4, 4, 3)
b = np.random.randn(1, 1, 1)
Z = conv_single_step(a_slice_prev, W, b)
print("Z =", Z)
```
**Expected Output**:
<table>
<tr>
<td>
**Z**
</td>
<td>
-6.99908945068
</td>
</tr>
</table>
### 3.3 - Convolutional Neural Networks - Forward pass
In the forward pass, you will take many filters and convolve them on the input. Each 'convolution' gives you a 2D matrix output. You will then stack these outputs to get a 3D volume:
<center>
<video width="620" height="440" src="images/conv_kiank.mp4" type="video/mp4" controls>
</video>
</center>
**Exercise**: Implement the function below to convolve the filters W on an input activation A_prev. This function takes as input A_prev, the activations output by the previous layer (for a batch of m inputs), F filters/weights denoted by W, and a bias vector denoted by b, where each filter has its own (single) bias. Finally you also have access to the hyperparameters dictionary which contains the stride and the padding.
**Hint**:
1. To select a 2x2 slice at the upper left corner of a matrix "a_prev" (shape (5,5,3)), you would do:
```python
a_slice_prev = a_prev[0:2,0:2,:]
```
This will be useful when you will define `a_slice_prev` below, using the `start/end` indexes you will define.
2. To define a_slice you will need to first define its corners `vert_start`, `vert_end`, `horiz_start` and `horiz_end`. This figure may be helpful for you to find how each of the corner can be defined using h, w, f and s in the code below.
<img src="images/vert_horiz_kiank.png" style="width:400px;height:300px;">
<caption><center> <u> <font color='purple'> **Figure 3** </u><font color='purple'> : **Definition of a slice using vertical and horizontal start/end (with a 2x2 filter)** <br> This figure shows only a single channel. </center></caption>
**Reminder**:
The formulas relating the output shape of the convolution to the input shape is:
$$ n_H = \lfloor \frac{n_{H_{prev}} - f + 2ย \times pad}{stride} \rfloor +1 $$
$$ n_W = \lfloor \frac{n_{W_{prev}} - f + 2ย \times pad}{stride} \rfloor +1 $$
$$ n_C = \text{number of filters used in the convolution}$$
For this exercise, we won't worry about vectorization, and will just implement everything with for-loops.
```
# GRADED FUNCTION: conv_forward
def conv_forward(A_prev, W, b, hparameters):
"""
Implements the forward propagation for a convolution function
Arguments:
A_prev -- output activations of the previous layer, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
W -- Weights, numpy array of shape (f, f, n_C_prev, n_C)
b -- Biases, numpy array of shape (1, 1, 1, n_C)
hparameters -- python dictionary containing "stride" and "pad"
Returns:
Z -- conv output, numpy array of shape (m, n_H, n_W, n_C)
cache -- cache of values needed for the conv_backward() function
"""
### START CODE HERE ###
# Retrieve dimensions from A_prev's shape (โ1 line)
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve dimensions from W's shape (โ1 line)
(f, f, n_C_prev, n_C) = W.shape
# Retrieve information from "hparameters" (โ2 lines)
stride = hparameters["stride"]
pad = hparameters["pad"]
# Compute the dimensions of the CONV output volume using the formula given above. Hint: use int() to floor. (โ2 lines)
n_H = int((n_H_prev - f + 2*pad)/stride) + 1
n_W = int((n_H_prev - f + 2*pad)/stride) + 1
# Initialize the output volume Z with zeros. (โ1 line)
Z = np.zeros((m, n_H, n_W, n_C))
# Create A_prev_pad by padding A_prev
A_prev_pad = zero_pad(A_prev, pad)
for i in range(m): # loop over the batch of training examples
a_prev_pad = A_prev_pad[i] # Select ith training example's padded activation
for h in range(n_H): # loop over vertical axis of the output volume
for w in range(n_W): # loop over horizontal axis of the output volume
for c in range(n_C): # loop over channels (= #filters) of the output volume
# Find the corners of the current "slice" (โ4 lines)
vert_start = h*stride
vert_end = vert_start + f
horiz_start = w * stride
horiz_end = horiz_start + f
# Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (โ1 line)
a_slice_prev = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]
# Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (โ1 line)
Z[i, h, w, c] = conv_single_step(a_slice_prev, W[...,c], b[...,c])
### END CODE HERE ###
# Making sure your output shape is correct
assert(Z.shape == (m, n_H, n_W, n_C))
# Save information in "cache" for the backprop
cache = (A_prev, W, b, hparameters)
return Z, cache
np.random.seed(1)
A_prev = np.random.randn(10,4,4,3)
W = np.random.randn(2,2,3,8)
b = np.random.randn(1,1,1,8)
hparameters = {"pad" : 2,
"stride": 2}
Z, cache_conv = conv_forward(A_prev, W, b, hparameters)
print("Z's mean =", np.mean(Z))
print("Z[3,2,1] =", Z[3,2,1])
print("cache_conv[0][1][2][3] =", cache_conv[0][1][2][3])
```
**Expected Output**:
<table>
<tr>
<td>
**Z's mean**
</td>
<td>
0.0489952035289
</td>
</tr>
<tr>
<td>
**Z[3,2,1]**
</td>
<td>
[-0.61490741 -6.7439236 -2.55153897 1.75698377 3.56208902 0.53036437
5.18531798 8.75898442]
</td>
</tr>
<tr>
<td>
**cache_conv[0][1][2][3]**
</td>
<td>
[-0.20075807 0.18656139 0.41005165]
</td>
</tr>
</table>
Finally, CONV layer should also contain an activation, in which case we would add the following line of code:
```python
# Convolve the window to get back one output neuron
Z[i, h, w, c] = ...
# Apply activation
A[i, h, w, c] = activation(Z[i, h, w, c])
```
You don't need to do it here.
## 4 - Pooling layer
The pooling (POOL) layer reduces the height and width of the input. It helps reduce computation, as well as helps make feature detectors more invariant to its position in the input. The two types of pooling layers are:
- Max-pooling layer: slides an ($f, f$) window over the input and stores the max value of the window in the output.
- Average-pooling layer: slides an ($f, f$) window over the input and stores the average value of the window in the output.
<table>
<td>
<img src="images/max_pool1.png" style="width:500px;height:300px;">
<td>
<td>
<img src="images/a_pool.png" style="width:500px;height:300px;">
<td>
</table>
These pooling layers have no parameters for backpropagation to train. However, they have hyperparameters such as the window size $f$. This specifies the height and width of the fxf window you would compute a max or average over.
### 4.1 - Forward Pooling
Now, you are going to implement MAX-POOL and AVG-POOL, in the same function.
**Exercise**: Implement the forward pass of the pooling layer. Follow the hints in the comments below.
**Reminder**:
As there's no padding, the formulas binding the output shape of the pooling to the input shape is:
$$ n_H = \lfloor \frac{n_{H_{prev}} - f}{stride} \rfloor +1 $$
$$ n_W = \lfloor \frac{n_{W_{prev}} - f}{stride} \rfloor +1 $$
$$ n_C = n_{C_{prev}}$$
```
# GRADED FUNCTION: pool_forward
def pool_forward(A_prev, hparameters, mode = "max"):
"""
Implements the forward pass of the pooling layer
Arguments:
A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
hparameters -- python dictionary containing "f" and "stride"
mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
Returns:
A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C)
cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters
"""
# Retrieve dimensions from the input shape
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve hyperparameters from "hparameters"
f = hparameters["f"]
stride = hparameters["stride"]
# Define the dimensions of the output
n_H = int(1 + (n_H_prev - f) / stride)
n_W = int(1 + (n_W_prev - f) / stride)
n_C = n_C_prev
# Initialize output matrix A
A = np.zeros((m, n_H, n_W, n_C))
### START CODE HERE ###
for i in range(m): # loop over the training examples
for h in range(n_H): # loop on the vertical axis of the output volume
for w in range(n_W): # loop on the horizontal axis of the output volume
for c in range (n_C): # loop over the channels of the output volume
# Find the corners of the current "slice" (โ4 lines)
vert_start = h*stride
vert_end = vert_start + f
horiz_start = w * stride
horiz_end = horiz_start + f
# Use the corners to define the current slice on the ith training example of A_prev, channel c. (โ1 line)
a_prev_slice = A_prev[i, vert_start:vert_end, horiz_start:horiz_end, c]
# Compute the pooling operation on the slice. Use an if statment to differentiate the modes. Use np.max/np.mean.
if mode == "max":
A[i, h, w, c] = np.max(a_prev_slice)
elif mode == "average":
A[i, h, w, c] = np.mean(a_prev_slice)
### END CODE HERE ###
# Store the input and hparameters in "cache" for pool_backward()
cache = (A_prev, hparameters)
# Making sure your output shape is correct
assert(A.shape == (m, n_H, n_W, n_C))
return A, cache
np.random.seed(1)
A_prev = np.random.randn(2, 4, 4, 3)
hparameters = {"stride" : 2, "f": 3}
A, cache = pool_forward(A_prev, hparameters)
print("mode = max")
print("A =", A)
print()
A, cache = pool_forward(A_prev, hparameters, mode = "average")
print("mode = average")
print("A =", A)
```
**Expected Output:**
<table>
<tr>
<td>
A =
</td>
<td>
[[[[ 1.74481176 0.86540763 1.13376944]]]
[[[ 1.13162939 1.51981682 2.18557541]]]]
</td>
</tr>
<tr>
<td>
A =
</td>
<td>
[[[[ 0.02105773 -0.20328806 -0.40389855]]]
[[[-0.22154621 0.51716526 0.48155844]]]]
</td>
</tr>
</table>
Congratulations! You have now implemented the forward passes of all the layers of a convolutional network.
The remainer of this notebook is optional, and will not be graded.
## 5 - Backpropagation in convolutional neural networks (OPTIONAL / UNGRADED)
In modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers don't need to bother with the details of the backward pass. The backward pass for convolutional networks is complicated. If you wish however, you can work through this optional portion of the notebook to get a sense of what backprop in a convolutional network looks like.
When in an earlier course you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in convolutional neural networks you can to calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are not trivial and we did not derive them in lecture, but we briefly presented them below.
### 5.1 - Convolutional layer backward pass
Let's start by implementing the backward pass for a CONV layer.
#### 5.1.1 - Computing dA:
This is the formula for computing $dA$ with respect to the cost for a certain filter $W_c$ and a given training example:
$$ dA += \sum _{h=0} ^{n_H} \sum_{w=0} ^{n_W} W_c \times dZ_{hw} \tag{1}$$
Where $W_c$ is a filter and $dZ_{hw}$ is a scalar corresponding to the gradient of the cost with respect to the output of the conv layer Z at the hth row and wth column (corresponding to the dot product taken at the ith stride left and jth stride down). Note that at each time, we multiply the the same filter $W_c$ by a different dZ when updating dA. We do so mainly because when computing the forward propagation, each filter is dotted and summed by a different a_slice. Therefore when computing the backprop for dA, we are just adding the gradients of all the a_slices.
In code, inside the appropriate for-loops, this formula translates into:
```python
da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]
```
#### 5.1.2 - Computing dW:
This is the formula for computing $dW_c$ ($dW_c$ is the derivative of one filter) with respect to the loss:
$$ dW_c += \sum _{h=0} ^{n_H} \sum_{w=0} ^ {n_W} a_{slice} \times dZ_{hw} \tag{2}$$
Where $a_{slice}$ corresponds to the slice which was used to generate the acitivation $Z_{ij}$. Hence, this ends up giving us the gradient for $W$ with respect to that slice. Since it is the same $W$, we will just add up all such gradients to get $dW$.
In code, inside the appropriate for-loops, this formula translates into:
```python
dW[:,:,:,c] += a_slice * dZ[i, h, w, c]
```
#### 5.1.3 - Computing db:
This is the formula for computing $db$ with respect to the cost for a certain filter $W_c$:
$$ db = \sum_h \sum_w dZ_{hw} \tag{3}$$
As you have previously seen in basic neural networks, db is computed by summing $dZ$. In this case, you are just summing over all the gradients of the conv output (Z) with respect to the cost.
In code, inside the appropriate for-loops, this formula translates into:
```python
db[:,:,:,c] += dZ[i, h, w, c]
```
**Exercise**: Implement the `conv_backward` function below. You should sum over all the training examples, filters, heights, and widths. You should then compute the derivatives using formulas 1, 2 and 3 above.
```
def conv_backward(dZ, cache):
"""
Implement the backward propagation for a convolution function
Arguments:
dZ -- gradient of the cost with respect to the output of the conv layer (Z), numpy array of shape (m, n_H, n_W, n_C)
cache -- cache of values needed for the conv_backward(), output of conv_forward()
Returns:
dA_prev -- gradient of the cost with respect to the input of the conv layer (A_prev),
numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
dW -- gradient of the cost with respect to the weights of the conv layer (W)
numpy array of shape (f, f, n_C_prev, n_C)
db -- gradient of the cost with respect to the biases of the conv layer (b)
numpy array of shape (1, 1, 1, n_C)
"""
### START CODE HERE ###
# Retrieve information from "cache"
(A_prev, W, b, hparameters) = None
# Retrieve dimensions from A_prev's shape
(m, n_H_prev, n_W_prev, n_C_prev) = None
# Retrieve dimensions from W's shape
(f, f, n_C_prev, n_C) = None
# Retrieve information from "hparameters"
stride = None
pad = None
# Retrieve dimensions from dZ's shape
(m, n_H, n_W, n_C) = None
# Initialize dA_prev, dW, db with the correct shapes
dA_prev = None
dW = None
db = None
# Pad A_prev and dA_prev
A_prev_pad = None
dA_prev_pad = None
for i in range(None): # loop over the training examples
# select ith training example from A_prev_pad and dA_prev_pad
a_prev_pad = None
da_prev_pad = None
for h in range(None): # loop over vertical axis of the output volume
for w in range(None): # loop over horizontal axis of the output volume
for c in range(None): # loop over the channels of the output volume
# Find the corners of the current "slice"
vert_start = None
vert_end = None
horiz_start = None
horiz_end = None
# Use the corners to define the slice from a_prev_pad
a_slice = None
# Update gradients for the window and the filter's parameters using the code formulas given above
da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += None
dW[:,:,:,c] += None
db[:,:,:,c] += None
# Set the ith training example's dA_prev to the unpaded da_prev_pad (Hint: use X[pad:-pad, pad:-pad, :])
dA_prev[i, :, :, :] = None
### END CODE HERE ###
# Making sure your output shape is correct
assert(dA_prev.shape == (m, n_H_prev, n_W_prev, n_C_prev))
return dA_prev, dW, db
np.random.seed(1)
dA, dW, db = conv_backward(Z, cache_conv)
print("dA_mean =", np.mean(dA))
print("dW_mean =", np.mean(dW))
print("db_mean =", np.mean(db))
```
** Expected Output: **
<table>
<tr>
<td>
**dA_mean**
</td>
<td>
1.45243777754
</td>
</tr>
<tr>
<td>
**dW_mean**
</td>
<td>
1.72699145831
</td>
</tr>
<tr>
<td>
**db_mean**
</td>
<td>
7.83923256462
</td>
</tr>
</table>
## 5.2 Pooling layer - backward pass
Next, let's implement the backward pass for the pooling layer, starting with the MAX-POOL layer. Even though a pooling layer has no parameters for backprop to update, you still need to backpropagation the gradient through the pooling layer in order to compute gradients for layers that came before the pooling layer.
### 5.2.1 Max pooling - backward pass
Before jumping into the backpropagation of the pooling layer, you are going to build a helper function called `create_mask_from_window()` which does the following:
$$ X = \begin{bmatrix}
1 && 3 \\
4 && 2
\end{bmatrix} \quad \rightarrow \quad M =\begin{bmatrix}
0 && 0 \\
1 && 0
\end{bmatrix}\tag{4}$$
As you can see, this function creates a "mask" matrix which keeps track of where the maximum of the matrix is. True (1) indicates the position of the maximum in X, the other entries are False (0). You'll see later that the backward pass for average pooling will be similar to this but using a different mask.
**Exercise**: Implement `create_mask_from_window()`. This function will be helpful for pooling backward.
Hints:
- [np.max()]() may be helpful. It computes the maximum of an array.
- If you have a matrix X and a scalar x: `A = (X == x)` will return a matrix A of the same size as X such that:
```
A[i,j] = True if X[i,j] = x
A[i,j] = False if X[i,j] != x
```
- Here, you don't need to consider cases where there are several maxima in a matrix.
```
def create_mask_from_window(x):
"""
Creates a mask from an input matrix x, to identify the max entry of x.
Arguments:
x -- Array of shape (f, f)
Returns:
mask -- Array of the same shape as window, contains a True at the position corresponding to the max entry of x.
"""
### START CODE HERE ### (โ1 line)
mask = None
### END CODE HERE ###
return mask
np.random.seed(1)
x = np.random.randn(2,3)
mask = create_mask_from_window(x)
print('x = ', x)
print("mask = ", mask)
```
**Expected Output:**
<table>
<tr>
<td>
**x =**
</td>
<td>
[[ 1.62434536 -0.61175641 -0.52817175] <br>
[-1.07296862 0.86540763 -2.3015387 ]]
</td>
</tr>
<tr>
<td>
**mask =**
</td>
<td>
[[ True False False] <br>
[False False False]]
</td>
</tr>
</table>
Why do we keep track of the position of the max? It's because this is the input value that ultimately influenced the output, and therefore the cost. Backprop is computing gradients with respect to the cost, so anything that influences the ultimate cost should have a non-zero gradient. So, backprop will "propagate" the gradient back to this particular input value that had influenced the cost.
### 5.2.2 - Average pooling - backward pass
In max pooling, for each input window, all the "influence" on the output came from a single input value--the max. In average pooling, every element of the input window has equal influence on the output. So to implement backprop, you will now implement a helper function that reflects this.
For example if we did average pooling in the forward pass using a 2x2 filter, then the mask you'll use for the backward pass will look like:
$$ dZ = 1 \quad \rightarrow \quad dZ =\begin{bmatrix}
1/4 && 1/4 \\
1/4 && 1/4
\end{bmatrix}\tag{5}$$
This implies that each position in the $dZ$ matrix contributes equally to output because in the forward pass, we took an average.
**Exercise**: Implement the function below to equally distribute a value dz through a matrix of dimension shape. [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ones.html)
```
def distribute_value(dz, shape):
"""
Distributes the input value in the matrix of dimension shape
Arguments:
dz -- input scalar
shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz
Returns:
a -- Array of size (n_H, n_W) for which we distributed the value of dz
"""
### START CODE HERE ###
# Retrieve dimensions from shape (โ1 line)
(n_H, n_W) = None
# Compute the value to distribute on the matrix (โ1 line)
average = None
# Create a matrix where every entry is the "average" value (โ1 line)
a = None
### END CODE HERE ###
return a
a = distribute_value(2, (2,2))
print('distributed value =', a)
```
**Expected Output**:
<table>
<tr>
<td>
distributed_value =
</td>
<td>
[[ 0.5 0.5]
<br\>
[ 0.5 0.5]]
</td>
</tr>
</table>
### 5.2.3 Putting it together: Pooling backward
You now have everything you need to compute backward propagation on a pooling layer.
**Exercise**: Implement the `pool_backward` function in both modes (`"max"` and `"average"`). You will once again use 4 for-loops (iterating over training examples, height, width, and channels). You should use an `if/elif` statement to see if the mode is equal to `'max'` or `'average'`. If it is equal to 'average' you should use the `distribute_value()` function you implemented above to create a matrix of the same shape as `a_slice`. Otherwise, the mode is equal to '`max`', and you will create a mask with `create_mask_from_window()` and multiply it by the corresponding value of dZ.
```
def pool_backward(dA, cache, mode = "max"):
"""
Implements the backward pass of the pooling layer
Arguments:
dA -- gradient of cost with respect to the output of the pooling layer, same shape as A
cache -- cache output from the forward pass of the pooling layer, contains the layer's input and hparameters
mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
Returns:
dA_prev -- gradient of cost with respect to the input of the pooling layer, same shape as A_prev
"""
### START CODE HERE ###
# Retrieve information from cache (โ1 line)
(A_prev, hparameters) = None
# Retrieve hyperparameters from "hparameters" (โ2 lines)
stride = None
f = None
# Retrieve dimensions from A_prev's shape and dA's shape (โ2 lines)
m, n_H_prev, n_W_prev, n_C_prev = None
m, n_H, n_W, n_C = None
# Initialize dA_prev with zeros (โ1 line)
dA_prev = None
for i in range(None): # loop over the training examples
# select training example from A_prev (โ1 line)
a_prev = None
for h in range(None): # loop on the vertical axis
for w in range(None): # loop on the horizontal axis
for c in range(None): # loop over the channels (depth)
# Find the corners of the current "slice" (โ4 lines)
vert_start = None
vert_end = None
horiz_start = None
horiz_end = None
# Compute the backward propagation in both modes.
if mode == "max":
# Use the corners and "c" to define the current slice from a_prev (โ1 line)
a_prev_slice = None
# Create the mask from a_prev_slice (โ1 line)
mask = None
# Set dA_prev to be dA_prev + (the mask multiplied by the correct entry of dA) (โ1 line)
dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += None
elif mode == "average":
# Get the value a from dA (โ1 line)
da = None
# Define the shape of the filter as fxf (โ1 line)
shape = None
# Distribute it to get the correct slice of dA_prev. i.e. Add the distributed value of da. (โ1 line)
dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += None
### END CODE ###
# Making sure your output shape is correct
assert(dA_prev.shape == A_prev.shape)
return dA_prev
np.random.seed(1)
A_prev = np.random.randn(5, 5, 3, 2)
hparameters = {"stride" : 1, "f": 2}
A, cache = pool_forward(A_prev, hparameters)
dA = np.random.randn(5, 4, 2, 2)
dA_prev = pool_backward(dA, cache, mode = "max")
print("mode = max")
print('mean of dA = ', np.mean(dA))
print('dA_prev[1,1] = ', dA_prev[1,1])
print()
dA_prev = pool_backward(dA, cache, mode = "average")
print("mode = average")
print('mean of dA = ', np.mean(dA))
print('dA_prev[1,1] = ', dA_prev[1,1])
```
**Expected Output**:
mode = max:
<table>
<tr>
<td>
**mean of dA =**
</td>
<td>
0.145713902729
</td>
</tr>
<tr>
<td>
**dA_prev[1,1] =**
</td>
<td>
[[ 0. 0. ] <br>
[ 5.05844394 -1.68282702] <br>
[ 0. 0. ]]
</td>
</tr>
</table>
mode = average
<table>
<tr>
<td>
**mean of dA =**
</td>
<td>
0.145713902729
</td>
</tr>
<tr>
<td>
**dA_prev[1,1] =**
</td>
<td>
[[ 0.08485462 0.2787552 ] <br>
[ 1.26461098 -0.25749373] <br>
[ 1.17975636 -0.53624893]]
</td>
</tr>
</table>
### Congratulations !
Congratulation on completing this assignment. You now understand how convolutional neural networks work. You have implemented all the building blocks of a neural network. In the next assignment you will implement a ConvNet using TensorFlow.
| github_jupyter |
<a href="https://colab.research.google.com/github/KordingLab/ENGR344/blob/master/tutorials/W4D1_How_do_we_know_how_certain_we_should_be/TA/W4D1_Tutorial4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Tutorial 4: Model Selection: Bias-variance trade-off
**Module 4: How do we know how certain we should be?**
**Originally By Neuromatch Academy**
**Content creators**: Pierre-รtienne Fiquet, Anqi Wu, Alex Hyafil with help from Byron Galbraith
**Content reviewers**: Lina Teichmann, Madineh Sarvestani, Patrick Mineault, Ella Batty, Michael Waskom
**Content Modifiers**: Konrad Kording, Ilenna Jones
**Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**
<p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>
```
# @title Due Dates Calendar
from ipywidgets import widgets
from IPython.display import display, IFrame, YouTubeVideo
out1 = widgets.Output()
with out1:
calendar = IFrame(src="https://calendar.google.com/calendar/embed?src=356b9d2nspjttvgbb3tvgk2f58%40group.calendar.google.com&ctz=America%2FNew_York", width=600, height=480)
display(calendar)
out = widgets.Tab([out1])
out.set_title(0, 'Calendar')
display(out)
```
---
# Tutorial Objectives
*Estimated timing of tutorial: 25 minutes*
This is Tutorial 5 of a series on fitting models to data. We start with simple linear regression, using least squares optimization (Tutorial 1) and Maximum Likelihood Estimation (Tutorial 2). We will use bootstrapping to build confidence intervals around the inferred linear model parameters (Tutorial 3). We'll finish our exploration of regression models by generalizing to multiple linear regression and polynomial regression (Tutorial 4). We end by learning how to choose between these various models. We discuss the bias-variance trade-off (Tutorial 5) and Cross Validation for model selection (Tutorial 6).
In this tutorial, we will learn about the bias-variance tradeoff and see it in action using polynomial regression models.
Tutorial objectives:
* Understand difference between test and train data
* Compare train and test error for models of varying complexity
* Understand how bias-variance tradeoff relates to what model we choose
```
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/2mkq4/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
# @title Video 1: Bias Variance Tradeoff
from ipywidgets import widgets
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="O0KEY0xpzMk", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1])
out.set_title(0, 'Youtube')
display(out)
```
---
# Setup
```
# Imports
import numpy as np
import matplotlib.pyplot as plt
#@title Figure Settings
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# @title Plotting Functions
def plot_MSE_poly_fits(mse_train, mse_test, max_order):
"""
Plot the MSE values for various orders of polynomial fits on the same bar
graph
Args:
mse_train (ndarray): an array of MSE values for each order of polynomial fit
over the training data
mse_test (ndarray): an array of MSE values for each order of polynomial fit
over the test data
max_order (scalar): max order of polynomial fit
"""
fig, ax = plt.subplots()
width = .35
ax.bar(np.arange(max_order + 1) - width / 2, mse_train, width, label="train MSE")
ax.bar(np.arange(max_order + 1) + width / 2, mse_test , width, label="test MSE")
ax.legend()
ax.set(xlabel='Polynomial order', ylabel='MSE', title ='Comparing polynomial fits');
# @title Helper functions
def ordinary_least_squares(x, y):
"""Ordinary least squares estimator for linear regression.
Args:
x (ndarray): design matrix of shape (n_samples, n_regressors)
y (ndarray): vector of measurements of shape (n_samples)
Returns:
ndarray: estimated parameter values of shape (n_regressors)
"""
return np.linalg.inv(x.T @ x) @ x.T @ y
def make_design_matrix(x, order):
"""Create the design matrix of inputs for use in polynomial regression
Args:
x (ndarray): input vector of shape (n_samples)
order (scalar): polynomial regression order
Returns:
ndarray: design matrix for polynomial regression of shape (samples, order+1)
"""
# Broadcast to shape (n x 1) so dimensions work
if x.ndim == 1:
x = x[:, None]
#if x has more than one feature, we don't want multiple columns of ones so we assign
# x^0 here
design_matrix = np.ones((x.shape[0],1))
# Loop through rest of degrees and stack columns
for degree in range(1, order+1):
design_matrix = np.hstack((design_matrix, x**degree))
return design_matrix
def solve_poly_reg(x, y, max_order):
"""Fit a polynomial regression model for each order 0 through max_order.
Args:
x (ndarray): input vector of shape (n_samples)
y (ndarray): vector of measurements of shape (n_samples)
max_order (scalar): max order for polynomial fits
Returns:
dict: fitted weights for each polynomial model (dict key is order)
"""
# Create a dictionary with polynomial order as keys, and np array of theta
# (weights) as the values
theta_hats = {}
# Loop over polynomial orders from 0 through max_order
for order in range(max_order+1):
X = make_design_matrix(x, order)
this_theta = ordinary_least_squares(X, y)
theta_hats[order] = this_theta
return theta_hats
```
---
# Section 1: Train vs test data
*Estimated timing to here from start of tutorial: 8 min*
The data used for the fitting procedure for a given model is the **training data**. In tutorial 4, we computed MSE on the training data of our polynomial regression models and compared training MSE across models. An additional important type of data is **test data**. This is held-out data that is not used (in any way) during the fitting procedure. When fitting models, we often want to consider both the train error (the quality of prediction on the training data) and the test error (the quality of prediction on the test data) as we will see in the next section.
We will generate some noisy data for use in this tutorial using a similar process as in Tutorial 4. However, now we will also generate test data. We want to see how our model generalizes beyond the range of values seen in the training phase. To accomplish this, we will generate x from a wider range of values ([-3, 3]). We then plot the train and test data together.
```
# @markdown Execute this cell to simulate both training and test data
### Generate training data
np.random.seed(0)
n_train_samples = 50
x_train = np.random.uniform(-2, 2.5, n_train_samples) # sample from a uniform distribution over [-2, 2.5)
noise = np.random.randn(n_train_samples) # sample from a standard normal distribution
y_train = x_train**2 - x_train - 2 + noise
### Generate testing data
n_test_samples = 20
x_test = np.random.uniform(-3, 3, n_test_samples) # sample from a uniform distribution over [-2, 2.5)
noise = np.random.randn(n_test_samples) # sample from a standard normal distribution
y_test = x_test**2 - x_test - 2 + noise
## Plot both train and test data
fig, ax = plt.subplots()
plt.title('Training & Test Data')
plt.plot(x_train, y_train, '.', markersize=15, label='Training')
plt.plot(x_test, y_test, 'g+', markersize=15, label='Test')
plt.legend()
plt.xlabel('x')
plt.ylabel('y');
```
---
# Section 2: Bias-variance tradeoff
*Estimated timing to here from start of tutorial: 10 min*
<details>
<summary> <font color='blue'>Click here for text recap of video </font></summary>
Finding a good model can be difficult. One of the most important concepts to keep in mind when modeling is the **bias-variance tradeoff**.
**Bias** is the difference between the prediction of the model and the corresponding true output variables you are trying to predict. Models with high bias will not fit the training data well since the predictions are quite different from the true data. These high bias models are overly simplified - they do not have enough parameters and complexity to accurately capture the patterns in the data and are thus **underfitting**.
**Variance** refers to the variability of model predictions for a given input. Essentially, do the model predictions change a lot with changes in the exact training data used? Models with high variance are highly dependent on the exact training data used - they will not generalize well to test data. These high variance models are **overfitting** to the data.
In essence:
* High bias, low variance models have high train and test error.
* Low bias, high variance models have low train error, high test error
* Low bias, low variance models have low train and test error
As we can see from this list, we ideally want low bias and low variance models! These goals can be in conflict though - models with enough complexity to have low bias also tend to overfit and depend on the training data more. We need to decide on the correct tradeoff.
In this section, we will see the bias-variance tradeoff in action with polynomial regression models of different orders.
</details>
Graphical illustration of bias and variance.
(Source: http://scott.fortmann-roe.com/docs/BiasVariance.html)

We will first fit polynomial regression models of orders 0-5 on our simulated training data just as we did in Tutorial 4.
```
# @markdown Execute this cell to estimate theta_hats
max_order = 5
theta_hats = solve_poly_reg(x_train, y_train, max_order)
```
## Coding Exercise 2: Compute and compare train vs test error
We will use MSE as our error metric again. Compute MSE on training data ($x_{train},y_{train}$) and test data ($x_{test}, y_{test}$) for each polynomial regression model (orders 0-5). Since you already developed code in T4 Exercise 4 for making design matrices and evaluating fit polynomials, we have ported that here into the functions `make_design_matrix` and `evaluate_poly_reg` for your use.
*Please think about after completing exercise before reading the following text! Do you think the order 0 model has high or low bias? High or low variance? How about the order 5 model?*
```
# @markdown Execute this cell for function `evalute_poly_reg`
def evaluate_poly_reg(x, y, theta_hats, max_order):
""" Evaluates MSE of polynomial regression models on data
Args:
x (ndarray): input vector of shape (n_samples)
y (ndarray): vector of measurements of shape (n_samples)
theta_hats (dict): fitted weights for each polynomial model (dict key is order)
max_order (scalar): max order of polynomial fit
Returns
(ndarray): mean squared error for each order, shape (max_order)
"""
mse = np.zeros((max_order + 1))
for order in range(0, max_order + 1):
X_design = make_design_matrix(x, order)
y_hat = np.dot(X_design, theta_hats[order])
residuals = y - y_hat
mse[order] = np.mean(residuals ** 2)
return mse
def compute_mse(x_train, x_test, y_train, y_test, theta_hats, max_order):
"""Compute MSE on training data and test data.
Args:
x_train(ndarray): training data input vector of shape (n_samples)
x_test(ndarray): test data input vector of shape (n_samples)
y_train(ndarray): training vector of measurements of shape (n_samples)
y_test(ndarray): test vector of measurements of shape (n_samples)
theta_hats(dict): fitted weights for each polynomial model (dict key is order)
max_order (scalar): max order of polynomial fit
Returns:
ndarray, ndarray: MSE error on training data and test data for each order
"""
#######################################################
## TODO for students: calculate mse error for both sets
## Hint: look back at tutorial 5 where we calculated MSE
# Fill out function and remove
raise NotImplementedError("Student excercise: calculate mse for train and test set")
#######################################################
mse_train = ...
mse_test = ...
return mse_train, mse_test
# Compute train and test MSE
mse_train, mse_test = compute_mse(x_train, x_test, y_train, y_test, theta_hats, max_order)
# Visualize
plot_MSE_poly_fits(mse_train, mse_test, max_order)
# to_remove solution
def compute_mse(x_train, x_test, y_train, y_test, theta_hats, max_order):
"""Compute MSE on training data and test data.
Args:
x_train(ndarray): training data input vector of shape (n_samples)
x_test(ndarray): test vector of shape (n_samples)
y_train(ndarray): training vector of measurements of shape (n_samples)
y_test(ndarray): test vector of measurements of shape (n_samples)
theta_hats(dict): fitted weights for each polynomial model (dict key is order)
max_order (scalar): max order of polynomial fit
Returns:
ndarray, ndarray: MSE error on training data and test data for each order
"""
mse_train = evaluate_poly_reg(x_train, y_train, theta_hats, max_order)
mse_test = evaluate_poly_reg(x_test, y_test, theta_hats, max_order)
return mse_train, mse_test
# Compute train and test MSE
mse_train, mse_test = compute_mse(x_train, x_test, y_train, y_test, theta_hats, max_order)
# Visualize
with plt.xkcd():
plot_MSE_poly_fits(mse_train, mse_test, max_order)
```
As we can see from the plot above, more complex models (higher order polynomials) have lower MSE for training data. The overly simplified models (orders 0 and 1) have high MSE on the training data. As we add complexity to the model, we go from high bias to low bias.
The MSE on test data follows a different pattern. The best test MSE is for an order 2 model - this makes sense as the data was generated with an order 2 model. Both simpler models and more complex models have higher test MSE.
So to recap:
Order 0 model: High bias, low variance
Order 5 model: Low bias, high variance
Order 2 model: Just right, low bias, low variance
---
# Summary
*Estimated timing of tutorial: 25 minutes*
- Training data is the data used for fitting, test data is held-out data.
- We need to strike the right balance between bias and variance. Ideally we want to find a model with optimal model complexity that has both low bias and low variance
- Too complex models have low bias and high variance.
- Too simple models have high bias and low variance.
**Note**
- Bias and variance are very important concepts in modern machine learning, but it has recently been observed that they do not necessarily trade off (see for example the phenomenon and theory of "double descent")
**Further readings:**
- [The elements of statistical learning](https://web.stanford.edu/~hastie/ElemStatLearn/) by Hastie, Tibshirani and Friedman
---
# Bonus
## Bonus Exercise
Prove the bias-variance decomposition for MSE
\begin{align}
\mathbb{E}_{x}\left[\left(y-\hat{y}(x ; \theta)\right)^{2}\right]=\left(\operatorname{Bias}_{x}[\hat{y}(x ; \theta)]\right)^{2}+\operatorname{Var}_{x}[\hat{y}(x ; \theta)]+\sigma^{2}
\end{align}
where
\begin{align}
\operatorname{Bias}_{x}[\hat{y}(x ; \theta)]=\mathbb{E}_{x}[\hat{y}(x ; \theta)]-y
\end{align}
and
\begin{align}
\operatorname{Var}_{x}[\hat{y}(x ; \theta)]=\mathbb{E}_{x}\left[\hat{y}(x ; \theta)^{2}\right]-\mathrm{E}_{x}[\hat{y}(x ; \theta)]^{2}
\end{align}
Hint: use
\begin{align}
\operatorname{Var}[X]=\mathbb{E}\left[X^{2}\right]-(\mathrm{E}[X])^{2}
\end{align}
| github_jupyter |
# SageMaker endpoint
To deploy the model you previously trained, you need to create a Sagemaker Endpoint. This is a hosted prediction service that you can use to perform inference.
## Finding the model
This notebook uses a stored model if it exists. If you recently ran a training example that use the `%store%` magic, it will be restored in the next cell.
Otherwise, you can pass the URI to the model file (a .tar.gz file) in the `model_data` variable.
You can find your model files through the [SageMaker console](https://console.aws.amazon.com/sagemaker/home) by choosing **Training > Training jobs** in the left navigation pane. Find your recent training job, choose it, and then look for the `s3://` link in the **Output** pane. Uncomment the model_data line in the next cell that manually sets the model's URI.
```
# Retrieve a saved model from a previous notebook run's stored variable
%store -r model_data
# If no model was found, set it manually here.
# model_data = 's3://sagemaker-us-west-2-XXX/pytorch-smdataparallel-mnist-2020-10-16-17-15-16-419/output/model.tar.gz'
print("Using this model: {}".format(model_data))
```
## Create a model object
You define the model object by using SageMaker SDK's `PyTorchModel` and pass in the model from the `estimator` and the `entry_point`. The endpoint's entry point for inference is defined by `model_fn` as seen in the following code block that prints out `inference.py`. The function loads the model and sets it to use a GPU, if available.
```
!pygmentize code/inference.py
import sagemaker
role = sagemaker.get_execution_role()
from sagemaker.pytorch import PyTorchModel
model = PyTorchModel(model_data=model_data, source_dir='code',
entry_point='inference.py', role=role, framework_version='1.6.0', py_version='py3')
```
### Deploy the model on an endpoint
You create a `predictor` by using the `model.deploy` function. You can optionally change both the instance count and instance type.
```
predictor = model.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')
```
## Test the model
You can test the depolyed model using samples from the test set.
```
# Download the test set
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
# TODO: can be removed after upgrade to torchvision==0.9.1
# see github.com/pytorch/vision/issues/1938 and github.com/pytorch/vision/issues/3549
datasets.MNIST.urls = [
'https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz',
'https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz',
'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz',
'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz'
]
test_set = datasets.MNIST('data', download=True, train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
# Randomly sample 16 images from the test set
test_loader = DataLoader(test_set, shuffle=True, batch_size=16)
test_images, _ = iter(test_loader).next()
# inspect the images
import torchvision
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
def imshow(img):
img = img.numpy()
img = np.transpose(img, (1, 2, 0))
plt.imshow(img)
return
# unnormalize the test images for displaying
unnorm_images = (test_images * 0.3081) + 0.1307
print("Sampled test images: ")
imshow(torchvision.utils.make_grid(unnorm_images))
# Send the sampled images to endpoint for inference
outputs = predictor.predict(test_images.numpy())
predicted = np.argmax(outputs, axis=1)
print("Predictions: ")
print(predicted.tolist())
```
## Cleanup
If you don't intend on trying out inference or to do anything else with the endpoint, you should delete it.
```
predictor.delete_endpoint()
```
| github_jupyter |
```
# Copyright 2021 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
```
<img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;">
# Getting Started MovieLens: Training with TensorFlow
## Overview
We observed that TensorFlow training pipelines can be slow as the dataloader is a bottleneck. The native dataloader in TensorFlow randomly sample each item from the dataset, which is very slow. The window dataloader in TensorFlow is not much faster. In our experiments, we are able to speed-up existing TensorFlow pipelines by 9x using a highly optimized dataloader.<br><br>
Applying deep learning models to recommendation systems faces unique challenges in comparison to other domains, such as computer vision and natural language processing. The datasets and common model architectures have unique characteristics, which require custom solutions. Recommendation system datasets have terabytes in size with billion examples but each example is represented by only a few bytes. For example, the [Criteo CTR dataset](https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/), the largest publicly available dataset, is 1.3TB with 4 billion examples. The model architectures have normally large embedding tables for the users and items, which do not fit on a single GPU. You can read more in our [blogpost](https://medium.com/nvidia-merlin/why-isnt-your-recommender-system-training-faster-on-gpu-and-what-can-you-do-about-it-6cb44a711ad4).
### Learning objectives
This notebook explains, how to use the NVTabular dataloader to accelerate TensorFlow training.
1. Use **NVTabular dataloader** with TensorFlow Keras model
2. Leverage **multi-hot encoded input features**
### MovieLens25M
The [MovieLens25M](https://grouplens.org/datasets/movielens/25m/) is a popular dataset for recommender systems and is used in academic publications. The dataset contains 25M movie ratings for 62,000 movies given by 162,000 users. Many projects use only the user/item/rating information of MovieLens, but the original dataset provides metadata for the movies, as well. For example, which genres a movie has. Although we may not improve state-of-the-art results with our neural network architecture, the purpose of this notebook is to explain how to integrate multi-hot categorical features into a neural network.
## NVTabular dataloader for TensorFlow
Weโve identified that the dataloader is one bottleneck in deep learning recommender systems when training pipelines with TensorFlow. The dataloader cannot prepare the next batch fast enough and therefore, the GPU is not fully utilized.
We developed a highly customized tabular dataloader for accelerating existing pipelines in TensorFlow. In our experiments, we see a speed-up by 9x of the same training workflow with NVTabular dataloader. NVTabular dataloaderโs features are:
- removing bottleneck of item-by-item dataloading
- enabling larger than memory dataset by streaming from disk
- reading data directly into GPU memory and remove CPU-GPU communication
- preparing batch asynchronously in GPU to avoid CPU-GPU communication
- supporting commonly used .parquet format
- easy integration into existing TensorFlow pipelines by using similar API - works with tf.keras models
More information in our [blogpost](https://medium.com/nvidia-merlin/training-deep-learning-based-recommender-systems-9x-faster-with-tensorflow-cc5a2572ea49).
```
# External dependencies
import os
import glob
import nvtabular as nvt
```
We define our base input directory, containing the data.
```
INPUT_DATA_DIR = os.environ.get(
"INPUT_DATA_DIR", os.path.expanduser("~/nvt-examples/movielens/data/")
)
# path to save the models
MODEL_BASE_DIR = os.environ.get("MODEL_BASE_DIR", os.path.expanduser("~/nvt-examples/"))
```
### Defining Hyperparameters
First, we define the data schema and differentiate between single-hot and multi-hot categorical features. Note, that we do not have any numerical input features.
```
BATCH_SIZE = 1024 * 32 # Batch Size
CATEGORICAL_COLUMNS = ["movieId", "userId"] # Single-hot
CATEGORICAL_MH_COLUMNS = ["genres"] # Multi-hot
NUMERIC_COLUMNS = []
# Output from ETL-with-NVTabular
TRAIN_PATHS = sorted(glob.glob(os.path.join(INPUT_DATA_DIR, "train", "*.parquet")))
VALID_PATHS = sorted(glob.glob(os.path.join(INPUT_DATA_DIR, "valid", "*.parquet")))
```
In the previous notebook, we used NVTabular for ETL and stored the workflow to disk. We can load the NVTabular workflow to extract important metadata for our training pipeline.
```
workflow = nvt.Workflow.load(os.path.join(INPUT_DATA_DIR, "workflow"))
```
The embedding table shows the cardinality of each categorical variable along with its associated embedding size. Each entry is of the form `(cardinality, embedding_size)`.
```
EMBEDDING_TABLE_SHAPES, MH_EMBEDDING_TABLE_SHAPES = nvt.ops.get_embedding_sizes(workflow)
EMBEDDING_TABLE_SHAPES.update(MH_EMBEDDING_TABLE_SHAPES)
EMBEDDING_TABLE_SHAPES
```
### Initializing NVTabular Dataloader for Tensorflow
We import TensorFlow and some NVTabular TF extensions, such as custom TensorFlow layers supporting multi-hot and the NVTabular TensorFlow data loader.
```
import os
import tensorflow as tf
# we can control how much memory to give tensorflow with this environment variable
# IMPORTANT: make sure you do this before you initialize TF's runtime, otherwise
# TF will have claimed all free GPU memory
os.environ["TF_MEMORY_ALLOCATION"] = "0.7" # fraction of free memory
from nvtabular.loader.tensorflow import KerasSequenceLoader, KerasSequenceValidater
from nvtabular.framework_utils.tensorflow import layers
```
First, we take a look on our data loader and how the data is represented as tensors. The NVTabular data loader are initialized as usually and we specify both single-hot and multi-hot categorical features as cat_names. The data loader will automatically recognize the single/multi-hot columns and represent them accordingly.
```
train_dataset_tf = KerasSequenceLoader(
TRAIN_PATHS, # you could also use a glob pattern
batch_size=BATCH_SIZE,
label_names=["rating"],
cat_names=CATEGORICAL_COLUMNS + CATEGORICAL_MH_COLUMNS,
cont_names=NUMERIC_COLUMNS,
engine="parquet",
shuffle=True,
buffer_size=0.06, # how many batches to load at once
parts_per_chunk=1,
)
valid_dataset_tf = KerasSequenceLoader(
VALID_PATHS, # you could also use a glob pattern
batch_size=BATCH_SIZE,
label_names=["rating"],
cat_names=CATEGORICAL_COLUMNS + CATEGORICAL_MH_COLUMNS,
cont_names=NUMERIC_COLUMNS,
engine="parquet",
shuffle=False,
buffer_size=0.06,
parts_per_chunk=1,
)
```
Let's generate a batch and take a look on the input features.<br><br>
We can see, that the single-hot categorical features (`userId` and `movieId`) have a shape of `(32768, 1)`, which is the batchsize (as usually).<br><br>
For the multi-hot categorical feature `genres`, we receive two Tensors `genres__values` and `genres__nnzs`.<br><br>
`genres__values` are the actual data, containing the genre IDs. Note that the Tensor has more values than the batch_size. The reason is, that one datapoint in the batch can contain more than one genre (multi-hot).<br>
`genres__nnzs` are a supporting Tensor, describing how many genres are associated with each datapoint in the batch.<br><br>
For example,
- if the first value in `genres__nnzs` is `5`, then the first 5 values in `genres__values` are associated with the first datapoint in the batch (movieId/userId).<br>
- if the second value in `genres__nnzs` is `2`, then the 6th and the 7th values in `genres__values` are associated with the second datapoint in the batch (continuing after the previous value stopped).<br>
- if the third value in `genres_nnzs` is `1`, then the 8th value in `genres__values` are associated with the third datapoint in the batch.
- and so on
```
batch = next(iter(train_dataset_tf))
batch[0]
```
We can see that the sum of `genres__nnzs` is equal to the shape of `genres__values`.
```
tf.reduce_sum(batch[0]["genres__nnzs"])
```
As each datapoint can have a different number of genres, it is more efficient to represent the genres as two flat tensors: One with the actual values (`genres__values`) and one with the length for each datapoint (`genres__nnzs`).
```
del batch
```
### Defining Neural Network Architecture
We will define a common neural network architecture for tabular data.
* Single-hot categorical features are fed into an Embedding Layer
* Each value of a multi-hot categorical features is fed into an Embedding Layer and the multiple Embedding outputs are combined via averaging
* The output of the Embedding Layers are concatenated
* The concatenated layers are fed through multiple feed-forward layers (Dense Layers with ReLU activations)
* The final output is a single number with sigmoid activation function
First, we will define some dictonary/lists for our network architecture.
```
inputs = {} # tf.keras.Input placeholders for each feature to be used
emb_layers = [] # output of all embedding layers, which will be concatenated
```
We create `tf.keras.Input` tensors for all 4 input features.
```
for col in CATEGORICAL_COLUMNS:
inputs[col] = tf.keras.Input(name=col, dtype=tf.int32, shape=(1,))
# Note that we need two input tensors for multi-hot categorical features
for col in CATEGORICAL_MH_COLUMNS:
inputs[col + "__values"] = tf.keras.Input(name=f"{col}__values", dtype=tf.int64, shape=(1,))
inputs[col + "__nnzs"] = tf.keras.Input(name=f"{col}__nnzs", dtype=tf.int64, shape=(1,))
```
Next, we initialize Embedding Layers with `tf.feature_column.embedding_column`.
```
for col in CATEGORICAL_COLUMNS + CATEGORICAL_MH_COLUMNS:
emb_layers.append(
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
col, EMBEDDING_TABLE_SHAPES[col][0]
), # Input dimension (vocab size)
EMBEDDING_TABLE_SHAPES[col][1], # Embedding output dimension
)
)
emb_layers
```
NVTabular implemented a custom TensorFlow layer `layers.DenseFeatures`, which takes as an input the different `tf.Keras.Input` and pre-initialized `tf.feature_column` and automatically concatenate them into a flat tensor. In the case of multi-hot categorical features, `DenseFeatures` organizes the inputs `__values` and `__nnzs` to define a `RaggedTensor` and combine them. `DenseFeatures` can handle numeric inputs, as well, but MovieLens does not provide numerical input features.
```
emb_layer = layers.DenseFeatures(emb_layers)
x_emb_output = emb_layer(inputs)
x_emb_output
```
We can see that the output shape of the concatenated layer is equal to the sum of the individual Embedding output dimensions (1040 = 16+512+512).
```
EMBEDDING_TABLE_SHAPES
```
We add multiple Dense Layers. Finally, we initialize the `tf.keras.Model` and add the optimizer.
```
x = tf.keras.layers.Dense(128, activation="relu")(x_emb_output)
x = tf.keras.layers.Dense(128, activation="relu")(x)
x = tf.keras.layers.Dense(128, activation="relu")(x)
x = tf.keras.layers.Dense(1, activation="sigmoid", name="output")(x)
model = tf.keras.Model(inputs=inputs, outputs=x)
model.compile("sgd", "binary_crossentropy")
# You need to install the dependencies
tf.keras.utils.plot_model(model)
```
### Training the deep learning model
We can train our model with `model.fit`. We need to use a Callback to add the validation dataloader.
```
validation_callback = KerasSequenceValidater(valid_dataset_tf)
history = model.fit(train_dataset_tf, callbacks=[validation_callback], epochs=1)
MODEL_NAME_TF = os.environ.get("MODEL_NAME_TF", "movielens_tf")
MODEL_PATH_TEMP_TF = os.path.join(MODEL_BASE_DIR, MODEL_NAME_TF, "1/model.savedmodel")
model.save(MODEL_PATH_TEMP_TF)
```
Before moving to the next notebook, `04a-Triton-Inference-with-TF.ipynb`, we need to generate the Triton Inference Server configurations and save the models in the correct format. We just saved TensorFlow model to disk, and in the previous notebook `02-ETL-with-NVTabular`, we saved the NVTabular workflow. Let's load the workflow.
The TensorFlow input layers expect the input datatype to be int32. Therefore, we need to change the output datatypes to int32 for our NVTabular workflow.
```
workflow = nvt.Workflow.load(os.path.join(INPUT_DATA_DIR, "workflow"))
workflow.output_dtypes["userId"] = "int32"
workflow.output_dtypes["movieId"] = "int32"
MODEL_NAME_ENSEMBLE = os.environ.get("MODEL_NAME_ENSEMBLE", "movielens")
# model path to save the models
MODEL_PATH = os.environ.get("MODEL_PATH", os.path.join(MODEL_BASE_DIR, "models"))
```
NVTabular provides a function to save the NVTabular workflow, TensorFlow model and Triton Inference Server (IS) config files via `export_tensorflow_ensemble`. We provide the model, workflow, a model name for ensemble model, path and output column.
```
# Creates an ensemble triton server model, where
# model: The tensorflow model that should be served
# workflow: The nvtabular workflow used in preprocessing
# name: The base name of the various triton models
from nvtabular.inference.triton import export_tensorflow_ensemble
export_tensorflow_ensemble(model, workflow, MODEL_NAME_ENSEMBLE, MODEL_PATH, ["rating"])
```
Now, we can move to the next notebook, [04-Triton-Inference-with-TF.ipynb](https://github.com/NVIDIA/NVTabular/blob/main/examples/getting-started-movielens/04-Triton-Inference-with-TF.ipynb), to send inference request to the Triton IS.
| github_jupyter |
# My Project
In addition to being a place to experiment, this project has been structured to build and serve your model in a Flask application. The purpose is to allow data science exploration to easily transition into deployed services and applications on the OpenShift platform. After saving this project to git, it can be built on the OpenShift platform to serve models.
Your dependencies will live in `requirements.txt` and your prediction function will live in `prediction.py`. As a Python based s2i application, this project can be configured and built upon to fit your needs.
### Project Organization
```
.
โโโ README.md
โโโ LICENSE
โโโ requirements.txt <- Used to install packages for s2i application
โโโ 0_start_here.ipynb <- Instructional notebook
โโโ 1_run_flask.ipynb <- Notebook for running flask locally to test
โโโ 2_test_flask.ipynb <- Notebook for testing flask requests
โโโ .gitignore <- standard python gitignore
โโโ .s2i <- hidden folder for advanced s2i configuration
โย ย โโโ environment <- s2i environment settings
โโโ gunicorn_config.py <- configuration for gunicorn when run in OpenShift
โโโ prediction.py <- the predict function called from Flask
โโโ wsgi.py <- basic Flask application
```
### Basic Flow
1. Install and manage dependencies in `requirements.txt`.
1. Experiment as usual.
1. Extract your prediction into the `prediction.py` file.
1. Update any dependencies.
1. Run and test your application locally.
1. Save to git.
For a complete overview, please read the [README.md](./README.md)
## Install Dependencies
```
import sys
!{sys.executable} -m pip install -r requirements.txt
```
## Experiment
Experiment with data and create your prediction function. Create any serialized models needed.
```
def predict(args_dict):
return {'prediction': 'not implemented'}
predict({'keys': 'values'})
```
## Create a Predict Function
Extract the prediction logic into a standalone python file, `prediction.py` in a `predict` function. Also, make sure `requirements.txt` is updated with any additional packages you've used and need for prediction.
```
def predict(args_dict):
return {'prediction': 'not implemented'}
```
## Test Predict Function
```
from prediction import predict
predict({'keys': 'values'})
```
### Run Flask
Run flask in a separate notebook ([1_run_flask.ipynb](./1_run_flask.ipynb)) to create a local service to try it out. You must run the application in a separate notebook since it will use the kernel until stopped.
```
!FLASK_ENV=development FLASK_APP=wsgi.py flask run
```
### Test the Flask Endpoint
Test your new service endpoint in this notebook or from a separate notebook ([2_test_flask.ipynb](./2_test_flask.ipynb)) to try it out. You can
```
!curl -X POST -H "Content-Type: application/json" --data '{"data": "hello world"}' http://localhost:5000/predictions
import requests
import json
response = requests.post('http://127.0.0.1:5000/predictions', '{"hello":"world"}')
response.json()
```
### Save Your Project to Git (and Build)
Now that you've created and tested your prediction and service endpoint, push the code up to git. This can be built as an s2i application on OpenShift.
| github_jupyter |
```
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns #Control figure
import numpy as np
import os
from datetime import date
matplotlib.style.use('ggplot')
%matplotlib inline
from sodapy import Socrata
#MyAppToken = ''
#client = Socrata("data.cityofnewyork.us", MyAppToken)
#results = client.get("dsg6-ifza", limit=2000)
#df = pd.DataFrame.from_records(results)
cwd = os.getcwd() #to get current working directory
#print(cwd)
df = pd.read_csv('DOHMH_Childcare_Center_Inspections.csv', encoding = "L1")
df.info()
def clean_string(astr):
return astr.lower().replace('.', '') \
.replace(',', '') \
.replace(';', '') \
.replace(':', '') \
.replace('รก', 'a') \
.replace('รฉ', 'e') \
.replace('รญ', 'i') \
.replace('รณ', 'o') \
.replace('รบ', 'u') \
.replace(' ', '_') \
.replace('รฑ', 'ni')
def clean_columns(df):
for series in df:
df.rename(columns={series:clean_string(series)}, inplace=True)
def execute(raw_dataset_path, clean_dataset_path):
print("\t-> Leyendo datos crudos a un DataFrame")
df = pd.read_csv('DOHMH_Childcare_Center_Inspections.csv')
print("\t-> Limpando columnas")
clean_columns(df)
df.info()
print("\t-> Reemplazando espacios en blanco")
for col in df.select_dtypes('object'):
df[col] = df[col].replace('\s+', ' ', regex=True)
print("\t-> Limpiando valores")
for col in df.select_dtypes('object'):
df[col] = df[col].str.strip()
df[col] = df[col].str.lower()
df[col] = df[col].str.replace('รก', 'a')
df[col] = df[col].str.replace('รฉ', 'e')
df[col] = df[col].str.replace('รญ', 'i')
df[col] = df[col].str.replace('รณ', 'o')
df[col] = df[col].str.replace('รบ', 'u')
df[col] = df[col].str.replace(' ', '_')
print("\t-> Cambiando NA por np.nan")
for col in df.select_dtypes('object'):
df.loc[df[col] == 'na', col] = np.nan
df.to_csv('df.csv', index=False)
print("\t-> Cuรกntos valores NaN tiene la base")
df.isnull().sum()
print("\t-> Eliminar duplicados")
df.duplicated().sum()
df = df.drop_duplicates()
df.shape
df.info()
```
### TABLA 3
```
tabla_3 = df.iloc[:, 0:28] #Seleccionamos sรณlo las columnas desde center_name hasta avg_critical_violation_rate
tabla_3.info()
tabla_3 = tabla_3.drop_duplicates()
tabla_3.shape
```
3.1
* Conservar รบnicamente las variables estรกticas que se utilizaron en el modelo: daycareid, borough,maximum_capacity, program_type, facility_type, violation_rate_percent, total_educational_workers, public_health_hazard_violation_rate, critical_violation_rate.
```
dummies = ["program_type", "facility_type", "borough"]
df_1 = pd.get_dummies(tabla_3[dummies])
tabla_3 = tabla_3.join(df_1)
tabla_3 = tabla_3.drop(['program_type', 'facility_type', 'borough'], axis = 1)
tabla_3.info()
```
### TABLA 4
Conservar รบnicamente las variables que aportaban informaciรณn sobre las inspecciones de la Tabla 2 (con la excepeciรณn de borough): daycareid, inspection_date, inspection_summary, violation_category y borough.
```
df.info()
tabla_4 = df.iloc[:, [4,12,28,30,33]]
print("\t-> Reagrupar en tres variables Inspection Summary Result: reason, result_1 y result_2")
tabla_4['inspection_summary_result'] = tabla_4['inspection_summary_result'].astype('str')
df_3 = pd.DataFrame(tabla_4.inspection_summary_result.str.split('_-_',1).tolist(), columns= ['reason', 'result'])
df_3['result'] = df_3['result'].astype('str')
df_4 = pd.DataFrame(df_3.result.str.split(';_',1).tolist(), columns = ['result_1', 'result_2'])
df_3 = df_3.drop(df_3.columns[[1]], axis=1)
df_4 = df_4.join(df_3)
tabla_4 = tabla_4.join(df_4)
tabla_4 = tabla_4.drop(['inspection_summary_result'], axis = 1) #Eliminar inspection_summary_result
print("\t-> A la variable reason la hacemos dummy, es decir, initial annual inspection es 1 y en otro caso es cero")
tabla_4.reason.value_counts(dropna=False)
tabla_4['initial_annual_inspection'] = tabla_4.reason.apply(lambda x: 1 if x == "initial_annual_inspection" else 0)
tabla_4.initial_annual_inspection.value_counts(dropna=False)
tabla_4 = tabla_4.drop(['reason'], axis=1) #Eliminamos la variable reason
print("\t-> Creamos dummies a las variables result_1 y result_2")
dummies = ["result_1", "result_2"]
df_2 = pd.get_dummies(tabla_4[dummies])
tabla_4 = tabla_4.join(df_2)
tabla_4 = tabla_4.drop(['result_1', 'result_2'], axis = 1) #Eliminamos variables que no necesitamos
print("\t-> Creamos variables de aรฑo, mes y dรญa a partir de Inspection date")
tabla_4['inspection_date'] = pd.to_datetime(tabla_4.inspection_date, format = '%m/%d/%Y')
tabla_4['inspection_year'] = tabla_4['inspection_date'].dt.year
tabla_4['inspection_month_name'] = tabla_4['inspection_date'].dt.month_name()
tabla_4['inspection_day_name'] = tabla_4['inspection_date'].dt.day_name()
print("\t-> Eliminamos dรญas festivos, sรกbado y domingo ")
tabla_4 = tabla_4.drop(tabla_4.loc[tabla_4['inspection_day_name']== 'Saturday'].index)
tabla_4 = tabla_4.drop(tabla_4.loc[tabla_4['inspection_day_name']== 'Sunday'].index)
print("\t-> Poner como primer columna center_id e inspection_date")
tabla_4.rename(columns={'day_care_id':'center_id'}, inplace=True)
def order(frame,var):
varlist =[w for w in frame.columns if w not in var]
frame = frame[var+varlist]
return frame
tabla_4 = order(tabla_4,['center_id', 'inspection_date'])
print("\t-> Ordenamos la base por year, month y day en forma descendente")
tabla_4.sort_values(['inspection_date'], ascending=[False], inplace=True)
print("\t-> Creamos dummy = 1 si existiรณ violaciรณn")
tabla_4.violation_category.value_counts(dropna=False)
tabla_4['violation'] = tabla_4['violation_category'].apply(lambda x: not pd.isnull(x))
tabla_4['violation'] = tabla_4['violation'].apply(lambda x: 1 if x == True else 0)
tabla_4.violation.value_counts(dropna=False)
print("\t-> Creamos dummy = 1 si existiรณ violaciรณn y es un problema de salud pรบblica")
tabla_4['public_hazard'] = tabla_4['violation_category'].apply(lambda x: 1 if x == 'public_health_hazard' else 0)
tabla_4.public_hazard.value_counts(dropna=False)
print("\t-> Creamos la variable violaciones_hist_salud_publica: Nรบmero de violaciones de salud pรบblica histรณricas (2016-2019) por centro")
tabla_4['violaciones_hist_salud_publica'] = tabla_4.public_hazard[(tabla_4.inspection_year != 2020)]
df_4 = tabla_4.groupby('center_id').violaciones_hist_salud_publica.sum().reset_index()
tabla_4 = pd.merge(left=tabla_4,right=df_4, how='left', left_on='center_id', right_on='center_id')
tabla_4 = tabla_4.drop(['violaciones_hist_salud_publica_x'], axis=1) #Eliminamos la variable repetida
tabla_4.rename(columns={'violaciones_hist_salud_publica_y':'violaciones_hist_salud_publica'}, inplace=True)
print("\t-> Creamos la variable violaciones_2019_salud_publica: Nรบmero de violaciones de salud pรบblica en el 2019 por centro")
tabla_4['violaciones_2019_salud_publica'] = tabla_4.public_hazard[(tabla_4.inspection_year == 2019)]
df_5 = tabla_4.groupby('center_id').violaciones_2019_salud_publica.sum().reset_index()
tabla_4 = pd.merge(left=tabla_4,right=df_5, how='left', left_on='center_id', right_on='center_id')
tabla_4 = tabla_4.drop(['violaciones_2019_salud_publica_x'], axis=1) #Eliminamos la variable repetida
tabla_4.rename(columns={'violaciones_2019_salud_publica_y':'violaciones_2019_salud_publica'}, inplace=True)
print("\t-> Creamos la variable violaciones_hist_criticas: Nรบmero de violaciones crรญticas histรณricas anteriores (2016-2019) por centro")
tabla_4['violation_critical'] = tabla_4['violation_category'].apply(lambda x: 1 if x == 'critical' else 0)
tabla_4['violaciones_hist_criticas'] = tabla_4.violation_critical[(tabla_4.inspection_year != 2020)]
df_6 = tabla_4.groupby('center_id').violaciones_hist_criticas.sum().reset_index()
tabla_4 = pd.merge(left=tabla_4,right=df_6, how='left', left_on='center_id', right_on='center_id')
tabla_4 = tabla_4.drop(['violaciones_hist_criticas_x'], axis=1) #Eliminamos la variable repetida
tabla_4.rename(columns={'violaciones_hist_criticas_y':'violaciones_hist_criticas'}, inplace=True)
print("\t-> Creamos la variable violaciones_2019_criticas: Nรบmero de violaciones crรญticas en el 2019 por centro")
tabla_4['violaciones_2019_criticas'] = tabla_4.violation_critical[(tabla_4.inspection_year == 2019)]
df_7 = tabla_4.groupby('center_id').violaciones_2019_criticas.sum().reset_index()
tabla_4 = pd.merge(left=tabla_4,right=df_7, how='left', left_on='center_id', right_on='center_id')
tabla_4 = tabla_4.drop(['violaciones_2019_criticas_x'], axis=1) #Eliminamos la variable repetida
tabla_4.rename(columns={'violaciones_2019_criticas_y':'violaciones_2019_criticas'}, inplace=True)
print("\t-> Creamos la variable ratio_violaciones_hist: Nรบmero de inspecciones en total de primera vez que resultaron en violaciรณn crรญtica o de salud pรบblica/ nรบmero de inspecciones de primera vez por centro")
df_8 = tabla_4.loc[tabla_4['inspection_year'] != 2020]
df_9 = df_8[df_8.violation_category.isin(['critical', 'public_health_hazard']) & df_8['initial_annual_inspection']==1]
df_10 = df_9.groupby('center_id').initial_annual_inspection.sum().reset_index()
df_11 = tabla_4.groupby('center_id').initial_annual_inspection.sum().reset_index()
df_12 = pd.merge(left=df_11,right=df_10, how='left', left_on='center_id', right_on='center_id')
df_12['ratio_violaciones_hist'] = df_12['initial_annual_inspection_y'] / df_12['initial_annual_inspection_x']
tabla_4 = pd.merge(left=tabla_4,right=df_12, how='left', left_on='center_id', right_on='center_id')
tabla_4 = tabla_4.drop(['initial_annual_inspection_x', 'initial_annual_inspection_y'], axis=1) #Eliminamos variables que no necesitamos
print("\t-> Creamos la variable ratio_violaciones_2019: Nรบmero de inspecciones en total de primera vez que resultaron en violaciรณn crรญtica o de salud pรบblica en el 2019 / nรบmero de inspecciones de primera vez por centro")
df_13 = tabla_4.loc[tabla_4['inspection_year'] == 2019]
df_14 = df_13[df_13.violation_category.isin(['critical', 'public_health_hazard']) & df_13['initial_annual_inspection']==1]
df_15 = df_14.groupby('center_id').initial_annual_inspection.sum().reset_index()
df_16 = pd.merge(left=df_11,right=df_15, how='left', left_on='center_id', right_on='center_id')
df_16['ratio_violaciones_2019'] = df_16['initial_annual_inspection_y'] / df_16['initial_annual_inspection_x']
tabla_4 = pd.merge(left=tabla_4,right=df_16, how='left', left_on='center_id', right_on='center_id')
tabla_4 = tabla_4.drop(['initial_annual_inspection_x','initial_annual_inspection_y'], axis=1) #Eliminamos variables que no necesitamos
print("\t-> Creamos la variable prom_violaciones_hist_borough: Promedio de violaciones histรณricas por distrito")
df_17 = tabla_4.loc[tabla_4['inspection_year'] != 2020]
df_18 = df_17.groupby('borough').violation.mean().reset_index()
tabla_4 = pd.merge(left=tabla_4,right=df_18, how='left', left_on='borough', right_on='borough')
tabla_4.rename(columns={'violation_y':'prom_violaciones_hist_borough'}, inplace=True)
tabla_4.rename(columns={'violation_x':'violation'}, inplace=True)
print("\t-> Creamos la variable prom_violaciones_2019_borough: Promedio de violaciones en el 2019 por distrito")
df_19 = tabla_4.loc[tabla_4['inspection_year'] == 2019]
df_20 = df_19.groupby('borough').violation.mean().reset_index()
tabla_4 = pd.merge(left=tabla_4,right=df_20, how='left', left_on='borough', right_on='borough')
tabla_4.rename(columns={'violation_y':'prom_violaciones_2019_borough'}, inplace=True)
tabla_4.rename(columns={'violation_x':'violation'}, inplace=True)
print("\t-> Creamos la variable ratio_violaciones_hist_sp: Nรบmero de violaciones de salud pรบblica de primera vez por centro histรณricas (2017-2019)/ nรบmero de violaciones de primera vez de todo tipo por centro histรณricas (2017-2019) ")
df_21 = tabla_4.loc[tabla_4['inspection_year'] != 2020]
df_22 = df_21.loc[df_21['initial_annual_inspection'] == 1]
df_23 = df_22.groupby('center_id').public_hazard.sum().reset_index()
df_24 = df_22.groupby('center_id').violation.sum().reset_index()
df_25 = pd.merge(left=df_23,right=df_24, how='left', left_on='center_id', right_on='center_id')
df_25['ratio_violaciones_hist_sp'] = df_25['public_hazard'] / df_25['violation']
tabla_4 = pd.merge(left=tabla_4,right=df_25, how='left', left_on='center_id', right_on='center_id')
tabla_4 = tabla_4.drop(['public_hazard_y','violation_y'], axis=1) #Eliminamos variables que no necesitamos
tabla_4.rename(columns={'violation_x':'violation'}, inplace=True)
tabla_4.rename(columns={'public_hazard_x':'public_hazard'}, inplace=True)
print("\t-> Creamos la variable ratio_violaciones_2019_sp: Nรบmero de violaciones de salud pรบblica de primera vez por centro en el 2019 / nรบmero de violaciones de primera vez de todo tipo por centro en el 2019 ")
df_26 = tabla_4.loc[tabla_4['inspection_year'] == 2019]
df_27 = df_26.loc[df_26['initial_annual_inspection'] == 1]
df_28 = df_27.groupby('center_id').public_hazard.sum().reset_index()
df_29 = df_27.groupby('center_id').violation.sum().reset_index()
df_30 = pd.merge(left=df_28,right=df_29, how='left', left_on='center_id', right_on='center_id')
df_30['ratio_violaciones_2019_sp'] = df_30['public_hazard'] / df_30['violation']
tabla_4 = pd.merge(left=tabla_4,right=df_30, how='left', left_on='center_id', right_on='center_id')
tabla_4 = tabla_4.drop(['public_hazard_y','violation_y'], axis=1) #Eliminamos variables que no necesitamos
tabla_4.rename(columns={'violation_x':'violation'}, inplace=True)
tabla_4.rename(columns={'public_hazard_x':'public_hazard'}, inplace=True)
print("\t-> Creamos la variable ratio_violaciones_hist_criticas: Nรบmero de violaciones crรญticas de primera vez por centro histรณricas (2017-2019)/ nรบmero de violaciones de primera vez de todo tipo por centro histรณricas (2017-2019)")
df_31 = tabla_4.loc[tabla_4['inspection_year'] != 2020]
df_32 = df_31.loc[df_31['initial_annual_inspection'] == 1]
df_33 = df_32.groupby('center_id').violation_critical.sum().reset_index()
df_34 = df_32.groupby('center_id').violation.sum().reset_index()
df_35 = pd.merge(left=df_33,right=df_34, how='left', left_on='center_id', right_on='center_id')
df_35['ratio_violaciones_hist_criticas'] = df_35['violation_critical'] / df_35['violation']
tabla_4 = pd.merge(left=tabla_4,right=df_35, how='left', left_on='center_id', right_on='center_id')
tabla_4 = tabla_4.drop(['violation_critical_y','violation_y'], axis=1) #Eliminamos variables que no necesitamos
tabla_4.rename(columns={'violation_x':'violation'}, inplace=True)
tabla_4.rename(columns={'violation_critical_x':'violation_critical'}, inplace=True)
print("\t-> Creamos la variable ratio_violaciones_2019_criticas: Nรบmero de violaciones crรญticas de primera vez por centro en el 2019/ nรบmero de violaciones de primera vez de todo tipo por centro en el 2019")
df_36 = tabla_4.loc[tabla_4['inspection_year'] == 2019]
df_37 = df_36.loc[df_36['initial_annual_inspection'] == 1]
df_38 = df_37.groupby('center_id').violation_critical.sum().reset_index()
df_39 = df_37.groupby('center_id').violation.sum().reset_index()
df_40 = pd.merge(left=df_38,right=df_39, how='left', left_on='center_id', right_on='center_id')
df_40['ratio_violaciones_2019_criticas'] = df_40['violation_critical'] / df_40['violation']
tabla_4 = pd.merge(left=tabla_4,right=df_40, how='left', left_on='center_id', right_on='center_id')
tabla_4 = tabla_4.drop(['violation_critical_y','violation_y'], axis=1) #Eliminamos variables que no necesitamos
tabla_4.rename(columns={'violation_x':'violation'}, inplace=True)
tabla_4.rename(columns={'violation_critical_x':'violation_critical'}, inplace=True)
tabla_4.info()
tabla_5 = tabla_4.join(tabla_3, lsuffix='_caller', rsuffix='_other')
tabla_5.info()
tabla_5 = tabla_5.set_index(['center_id', 'inspection_date'])
tabla_5.info()
tabla_5 = tabla_5.drop(tabla_5.columns[[0,1,14,15,31,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,49,51,53,55]], axis=1) #Eliminamos variables que no necesitamos
tabla_5.info()
tabla_5 = tabla_5.fillna(0)
```
Modelo Random Forest
- Para el entrenamiento se usaron todos los datos del 2017-2019 y para validaciรณn los datos correspondientes a lo que va del aรฑo 2020.
- Mediante una grรกfica de barras se verifica si la muestra esta balanceada o no y se observa que no estรก balanceada.
```
sns.countplot(x='public_hazard', data=tabla_5, palette="Set3")
```
- Asรญ que se utiliza _over-sampling_ para balancear la muestra.
```
count_class_0, count_class_1 = tabla_5.public_hazard.value_counts()
df_class_0 = tabla_5[tabla_5['public_hazard'] == 0]
df_class_1 = tabla_5[tabla_5['public_hazard'] == 1]
count_class_0
count_class_1
df_class_0_over = df_class_0.sample(count_class_1, replace=True)
df_test_over = pd.concat([df_class_1, df_class_0_over], axis=0)
print('Random over-sampling:')
print(df_test_over.public_hazard.value_counts())
df_test_over.public_hazard.value_counts().plot(kind='bar', title='Count (public_hazard)');
df_train = df_test_over.loc[df_test_over['inspection_year'] != 2020]
df_test = df_test_over.loc[df_test_over['inspection_year'] == 2020]
Y_train = df_train[['public_hazard']]
Y_test = df_test[['public_hazard']]
X_train = df_train[[i for i in df_train.keys() if i not in Y_train]]
X_test = df_test[[i for i in df_test.keys() if i not in Y_test]]
import sklearn as sk
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.metrics import mean_squared_error
np.random.seed(0)
rforest = RandomForestClassifier(n_estimators=600, class_weight="balanced", max_depth=8, criterion='gini')
rforest.fit(X_train,Y_train.values.ravel())
Y_pred = rforest.predict(X_test)
print("Accuracy:",metrics.accuracy_score(Y_test, Y_pred))
print("Precision:",metrics.precision_score(Y_test, Y_pred, average='macro'))
print("Recall:",metrics.recall_score(Y_test, Y_pred, average='macro'))
rforest_matrix=metrics.confusion_matrix(Y_test,Y_pred)
pd.DataFrame(rforest_matrix)
class_names=[0,1]
fig, ax = plt.subplots()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names)
plt.yticks(tick_marks, class_names)
sns.heatmap(pd.DataFrame(rforest_matrix), annot=True, cmap="YlGnBu" ,fmt='g')
ax.xaxis.set_label_position("top")
plt.tight_layout()
plt.title('Confusion matrix', y=1.1)
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
feature_importance_frame = pd.DataFrame()
feature_importance_frame['features'] = list(X_train.keys())
feature_importance_frame['importance'] = list(rforest.feature_importances_)
feature_importance_frame = feature_importance_frame.sort_values(
'importance', ascending=False)
feature_importance_frame
```
Modelo XGBoost
```
import xgboost as xgb
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score
from collections import Counter
from sklearn.datasets import make_classification
import multiprocessing
xg_clas = xgb.XGBClassifier(n_estimators=500, max_depth=3, learning_rate=0.01, subsample=1, objective='binary:logistic', booster='gbtree', n_jobs=1, nthread=multiprocessing.cpu_count())
xg_clas.fit(X_train, Y_train)
Y_p = xg_clas.predict(X_test)
cnf_matrix = metrics.confusion_matrix(Y_test, Y_p)
pd.DataFrame(cnf_matrix)
class_names=[0,1]
fig, ax = plt.subplots()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names)
plt.yticks(tick_marks, class_names)
sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap="YlGnBu" ,fmt='g')
ax.xaxis.set_label_position("top")
plt.tight_layout()
plt.title('Confusion matrix', y=1.1)
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
feature_importance_frame = pd.DataFrame()
feature_importance_frame['features'] = list(X_train.keys())
feature_importance_frame['importance'] = list(xg_clas.feature_importances_)
feature_importance_frame = feature_importance_frame.sort_values(
'importance', ascending=False)
feature_importance_frame
```
| github_jupyter |
```
import glob
import numpy as np
from keras.layers import LSTM,Dense,Embedding,Dropout
from keras.models import Sequential
from keras.utils import np_utils
from music21 import converter,instrument,note,chord
from pickle import dump #used to save file
def get_notes():
#function created to get notes from the files dataset downloaded
notes=[]
for file in glob.glob("Piano-midi.de/train/*.mid"):
midi=converter.parse(file)
#Using that stream object we get a list of all the notes and chords in the file.
print(file)
notes_to_pass=None
parts=instrument.partitionByInstrument(midi)
if parts:#file has instrument part
notes_to_pass=parts.parts[0].recurse()
else:#file has notes in flat format
notes_to_pass=midi.flat.notes
for elements in notes_to_pass:
if isinstance(elements,note.Note):
notes.append(str(elements.pitch))
elif isinstance(elements,chord.Chord):
notes.append('.'.join(str(n) for n in elements.normalOrder))
# saving the notes we have created
with open("notes","wb") as filepath:
dump(notes,filepath)
return notes
```
# what i have done above
We start by loading each file into a Music21 stream object using the converter.parse(file) function. Using that stream object we get a list of all the notes and chords in the file. We append the pitch of every note object using its string notation since the most significant parts of the note can be recreated using the string notation of the pitch. And we append every chord by encoding the id of every note in the chord together into a single string, with each note being separated by a dot. These encodings allows us to easily decode the output generated by the network into the correct notes and chords.
# next
we have to create input sequences for the network and their respective outputs. The output for each input sequence will be the first note or chord that comes after the sequence of notes in the input sequence in our list of notes.
```
def create_seq(notes,n_vocab):
length=50
pitchname=sorted(set(pitch for pitch in notes))
#mapping each node to int
note_to_int=dict((note,number) for number,note in enumerate(pitchname))
input_seq=[]
output_seq=[]
for i in range(0,len(notes)-length,1):
in_seq=notes[i:i+length]
out_seq=notes[i+length]
input_seq.append([note_to_int[char] for char in in_seq])
output_seq.append(note_to_int[out_seq])
n_pattern=len(input_seq)
#input_seq=np.array(input_seq)
#output_seq=np.array(output_seq)
#reshaping the input to amke it compatible with lstm
input_seq=np.reshape(input_seq,(n_pattern,length,1))
#normalizing
input_seq=input_seq/float(n_vocab)
output_seq=np_utils.to_categorical(output_seq)
return (input_seq,output_seq)
notes=get_notes()
n_vocab=len(set(notes))
input_seq,output_seq=create_seq(notes,n_vocab)
#cretaing deep learning model
model=Sequential()
model.add(LSTM(256,input_shape=(input_seq.shape[1],input_seq.shape[2]),return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(512,return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(256))
model.add(Dense(256,activation="relu"))
model.add(Dropout(0.3))
model.add(Dense(n_vocab,activation="softmax"))
model.compile(loss="categorical_crossentropy",optimizer="rmsprop")
model.summary()
from keras.callbacks import ModelCheckpoint
filepath = "weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss',verbose=0,save_best_only=True,mode='min')
callbacks_list = [checkpoint]
model.fit(input_seq,output_seq, epochs=200, batch_size=64, callbacks=callbacks_list)
```
| github_jupyter |
# JAX As Accelerated NumPy
[](https://colab.research.google.com/github/google/jax/blob/main/docs/jax-101/01-jax-basics.ipynb)
*Authors: Rosalia Schneider & Vladimir Mikulik*
In this first section you will learn the very fundamentals of JAX.
## Getting started with JAX numpy
Fundamentally, JAX is a library that enables transformations of array-manipulating programs written with a NumPy-like API.
Over the course of this series of guides, we will unpack exactly what that means. For now, you can think of JAX as *differentiable NumPy that runs on accelerators*.
The code below shows how to import JAX and create a vector.
```
import jax
import jax.numpy as jnp
x = jnp.arange(10)
print(x)
```
So far, everything is just like NumPy. A big appeal of JAX is that you don't need to learn a new API. Many common NumPy programs would run just as well in JAX if you substitute `np` for `jnp`. However, there are some important differences which we touch on at the end of this section.
You can notice the first difference if you check the type of `x`. It is a variable of type `DeviceArray`, which is the way JAX represents arrays.
```
x
```
One useful feature of JAX is that the same code can be run on different backends -- CPU, GPU and TPU.
We will now perform a dot product to demonstrate that it can be done in different devices without changing the code. We use `%timeit` to check the performance.
(Technical detail: when a JAX function is called, the corresponding operation is dispatched to an accelerator to be computed asynchronously when possible. The returned array is therefore not necessarily 'filled in' as soon as the function returns. Thus, if we don't require the result immediately, the computation won't block Python execution. Therefore, unless we `block_until_ready`, we will only time the dispatch, not the actual computation. See [Asynchronous dispatch](https://jax.readthedocs.io/en/latest/async_dispatch.html#asynchronous-dispatch) in the JAX docs.)
```
long_vector = jnp.arange(int(1e7))
%timeit jnp.dot(long_vector, long_vector).block_until_ready()
```
**Tip**: Try running the code above twice, once without an accelerator, and once with a GPU runtime (while in Colab, click *Runtime* โ *Change Runtime Type* and choose `GPU`). Notice how much faster it runs on a GPU.
## JAX first transformation: `grad`
A fundamental feature of JAX is that it allows you to transform functions.
One of the most commonly used transformations is `jax.grad`, which takes a numerical function written in Python and returns you a new Python function that computes the gradient of the original function.
To use it, let's first define a function that takes an array and returns the sum of squares.
```
def sum_of_squares(x):
return jnp.sum(x**2)
```
Applying `jax.grad` to `sum_of_squares` will return a different function, namely the gradient of `sum_of_squares` with respect to its first parameter `x`.
Then, you can use that function on an array to return the derivatives with respect to each element of the array.
```
sum_of_squares_dx = jax.grad(sum_of_squares)
x = jnp.asarray([1.0, 2.0, 3.0, 4.0])
print(sum_of_squares(x))
print(sum_of_squares_dx(x))
```
You can think of `jax.grad` by analogy to the $\nabla$ operator from vector calculus. Given a function $f(x)$, $\nabla f$ represents the function that computes $f$'s gradient, i.e.
$$
(\nabla f)(x)_i = \frac{\partial f}{\partial x_i}(x).
$$
Analogously, `jax.grad(f)` is the function that computes the gradient, so `jax.grad(f)(x)` is the gradient of `f` at `x`.
(Like $\nabla$, `jax.grad` will only work on functions with a scalar output -- it will raise an error otherwise.)
This makes the JAX API quite different from other autodiff libraries like Tensorflow and PyTorch, where to compute the gradient we use the loss tensor itself (e.g. by calling `loss.backward()`). The JAX API works directly with functions, staying closer to the underlying math. Once you become accustomed to this way of doing things, it feels natural: your loss function in code really is a function of parameters and data, and you find its gradient just like you would in the math.
This way of doing things makes it straightforward to control things like which variables to differentiate with respect to. By default, `jax.grad` will find the gradient with respect to the first argument. In the example below, the result of `sum_squared_error_dx` will be the gradient of `sum_squared_error` with respect to `x`.
```
def sum_squared_error(x, y):
return jnp.sum((x-y)**2)
sum_squared_error_dx = jax.grad(sum_squared_error)
y = jnp.asarray([1.1, 2.1, 3.1, 4.1])
print(sum_squared_error_dx(x, y))
```
To find the gradient with respect to a different argument (or several), you can set `argnums`:
```
jax.grad(sum_squared_error, argnums=(0, 1))(x, y) # Find gradient wrt both x & y
```
Does this mean that when doing machine learning, we need to write functions with gigantic argument lists, with an argument for each model parameter array? No. JAX comes equipped with machinery for bundling arrays together in data structures called 'pytrees', on which more in a [later guide](https://colab.research.google.com/github/google/jax/blob/main/docs/jax-101/05.1-pytrees.ipynb). So, most often, use of `jax.grad` looks like this:
```
def loss_fn(params, data):
...
grads = jax.grad(loss_fn)(params, data_batch)
```
where `params` is, for example, a nested dict of arrays, and the returned `grads` is another nested dict of arrays with the same structure.
## Value and Grad
Often, you need to find both the value and the gradient of a function, e.g. if you want to log the training loss. JAX has a handy sister transformation for efficiently doing that:
```
jax.value_and_grad(sum_squared_error)(x, y)
```
which returns a tuple of, you guessed it, (value, grad). To be precise, for any `f`,
```
jax.value_and_grad(f)(*xs) == (f(*xs), jax.grad(f)(*xs))
```
## Auxiliary data
In addition to wanting to log the value, we often want to report some intermediate results obtained in computing the loss function. But if we try doing that with regular `jax.grad`, we run into trouble:
```
def squared_error_with_aux(x, y):
return sum_squared_error(x, y), x-y
jax.grad(squared_error_with_aux)(x, y)
```
This is because `jax.grad` is only defined on scalar functions, and our new function returns a tuple. But we need to return a tuple to return our intermediate results! This is where `has_aux` comes in:
```
jax.grad(squared_error_with_aux, has_aux=True)(x, y)
```
`has_aux` signifies that the function returns a pair, `(out, aux)`. It makes `jax.grad` ignore `aux`, passing it through to the user, while differentiating the function as if only `out` was returned.
## Differences from NumPy
The `jax.numpy` API closely follows that of NumPy. However, there are some important differences. We cover many of these in future guides, but it's worth pointing some out now.
The most important difference, and in some sense the root of all the rest, is that JAX is designed to be _functional_, as in _functional programming_. The reason behind this is that the kinds of program transformations that JAX enables are much more feasible in functional-style programs.
An introduction to functional programming (FP) is out of scope of this guide. If you already are familiar with FP, you will find your FP intuition helpful while learning JAX. If not, don't worry! The important feature of functional programming to grok when working with JAX is very simple: don't write code with side-effects.
A side-effect is any effect of a function that doesn't appear in its output. One example is modifying an array in place:
```
import numpy as np
x = np.array([1, 2, 3])
def in_place_modify(x):
x[0] = 123
return None
in_place_modify(x)
x
```
The side-effectful function modifies its argument, but returns a completely unrelated value. The modification is a side-effect.
The code below will run in NumPy. However, JAX arrays won't allow themselves to be modified in-place:
```
in_place_modify(jnp.array(x)) # Raises error when we cast input to jnp.ndarray
```
Helpfully, the error points us to JAX's side-effect-free way of doing the same thing via the [`jax.numpy.ndarray.at`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.ndarray.at.html) index update operators (be careful [`jax.ops.index_*`](https://jax.readthedocs.io/en/latest/jax.ops.html#indexed-update-functions-deprecated) functions are deprecated). They are analogous to in-place modification by index, but create a new array with the corresponding modifications made:
```
def jax_in_place_modify(x):
return x.at[0].set(123)
y = jnp.array([1, 2, 3])
jax_in_place_modify(y)
```
Note that the old array was untouched, so there is no side-effect:
```
y
```
Side-effect-free code is sometimes called *functionally pure*, or just *pure*.
Isn't the pure version less efficient? Strictly, yes; we are creating a new array. However, as we will explain in the next guide, JAX computations are often compiled before being run using another program transformation, `jax.jit`. If we don't use the old array after modifying it 'in place' using indexed update operators, the compiler can recognise that it can in fact compile to an in-place modify, resulting in efficient code in the end.
Of course, it's possible to mix side-effectful Python code and functionally pure JAX code, and we will touch on this more later. As you get more familiar with JAX, you will learn how and when this can work. As a rule of thumb, however, any functions intended to be transformed by JAX should avoid side-effects, and the JAX primitives themselves will try to help you do that.
We will explain other places where the JAX idiosyncracies become relevant as they come up. There is even a section that focuses entirely on getting used to the functional programming style of handling state: [Part 7: Problem of State](https://colab.research.google.com/github/google/jax/blob/main/docs/jax-101/07-state.ipynb). However, if you're impatient, you can find a [summary of JAX's sharp edges](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html) in the JAX docs.
## Your first JAX training loop
We still have much to learn about JAX, but you already know enough to understand how we can use JAX to build a simple training loop.
To keep things simple, we'll start with a linear regression.
Our data is sampled according to $y = w_{true} x + b_{true} + \epsilon$.
```
import numpy as np
import matplotlib.pyplot as plt
xs = np.random.normal(size=(100,))
noise = np.random.normal(scale=0.1, size=(100,))
ys = xs * 3 - 1 + noise
plt.scatter(xs, ys);
```
Therefore, our model is $\hat y(x; \theta) = wx + b$.
We will use a single array, `theta = [w, b]` to house both parameters:
```
def model(theta, x):
"""Computes wx + b on a batch of input x."""
w, b = theta
return w * x + b
```
The loss function is $J(x, y; \theta) = (\hat y - y)^2$.
```
def loss_fn(theta, x, y):
prediction = model(theta, x)
return jnp.mean((prediction-y)**2)
```
How do we optimize a loss function? Using gradient descent. At each update step, we will find the gradient of the loss w.r.t. the parameters, and take a small step in the direction of steepest descent:
$\theta_{new} = \theta - 0.1 (\nabla_\theta J) (x, y; \theta)$
```
def update(theta, x, y, lr=0.1):
return theta - lr * jax.grad(loss_fn)(theta, x, y)
```
In JAX, it's common to define an `update()` function that is called every step, taking the current parameters as input and returning the new parameters. This is a natural consequence of JAX's functional nature, and is explained in more detail in [The Problem of State](https://colab.research.google.com/github/google/jax/blob/main/docs/jax-101/07-state.ipynb).
This function can then be JIT-compiled in its entirety for maximum efficiency. The next guide will explain exactly how `jax.jit` works, but if you want to, you can try adding `@jax.jit` before the `update()` definition, and see how the training loop below runs much faster.
```
theta = jnp.array([1., 1.])
for _ in range(1000):
theta = update(theta, xs, ys)
plt.scatter(xs, ys)
plt.plot(xs, model(theta, xs))
w, b = theta
print(f"w: {w:<.2f}, b: {b:<.2f}")
```
As you will see going through these guides, this basic recipe underlies almost all training loops you'll see implemented in JAX. The main difference between this example and real training loops is the simplicity of our model: that allows us to use a single array to house all our parameters. We cover managing more parameters in the later [pytree guide](https://colab.research.google.com/github/google/jax/blob/main/docs/jax-101/05.1-pytrees.ipynb). Feel free to skip forward to that guide now to see how to manually define and train a simple MLP in JAX.
| github_jupyter |
# 2.4 ใใใใฏใผใฏใขใใซใฎๅฎ่ฃ
ใ2.5 ้ ไผๆฌ้ขๆฐใฎๅฎ่ฃ
ๆฌใใกใคใซใงใฏใSSDใฎใใใใฏใผใฏใขใใซใจ้ ไผๆฌforward้ขๆฐใไฝๆใใพใใ
# 2.4 ๅญฆ็ฟ็ฎๆจ
1. SSDใฎใใใใฏใผใฏใขใใซใๆง็ฏใใฆใใ4ใคใฎใขใธใฅใผใซใๆๆกใใ
2. SSDใฎใใใใฏใผใฏใขใใซใไฝๆใงใใใใใซใชใ
3. SSDใงไฝฟ็จใใๆงใ
ใชๅคงใใใฎใใใฉใซใใใใฏในใฎๅฎ่ฃ
ๆนๆณใ็่งฃใใ
# 2.5 ๅญฆ็ฟ็ฎๆจ
1. Non-Maximum Suppressionใ็่งฃใใ
2. SSDใฎๆจ่ซๆใซไฝฟ็จใใDetectใฏใฉในใฎ้ ไผๆฌใ็่งฃใใ
3. SSDใฎ้ ไผๆฌใๅฎ่ฃ
ใงใใใใใซใชใ
# ไบๅๆบๅ
ใจใใซใชใ
```
# ใใใฑใผใธใฎimport
from math import sqrt
from itertools import product
import pandas as pd
import torch
from torch.autograd import Function
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
```
# vggใขใธใฅใผใซใๅฎ่ฃ
```
# 34ๅฑคใซใใใใvggใขใธใฅใผใซใไฝๆ
def make_vgg():
layers = []
in_channels = 3 # ่ฒใใฃใใซๆฐ
# vggใขใธใฅใผใซใงไฝฟ็จใใ็ณใฟ่พผใฟๅฑคใใใใฏในใใผใชใณใฐใฎใใฃใใซๆฐ
cfg = [64, 64, 'M', 128, 128, 'M', 256, 256,
256, 'MC', 512, 512, 512, 'M', 512, 512, 512]
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'MC':
# ceilใฏๅบๅใตใคใบใใ่จ็ฎ็ตๆ๏ผfloat๏ผใซๅฏพใใฆใๅใไธใใงๆดๆฐใซใใใขใผใ
# ใใใฉใซใใงใฏๅบๅใตใคใบใ่จ็ฎ็ตๆ๏ผfloat๏ผใซๅฏพใใฆใๅใไธใใงๆดๆฐใซใใfloorใขใผใ
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]
return nn.ModuleList(layers)
# ๅไฝ็ขบ่ช
vgg_test = make_vgg()
print(vgg_test)
```
# extrasใขใธใฅใผใซใๅฎ่ฃ
```
# 8ๅฑคใซใใใใextrasใขใธใฅใผใซใไฝๆ
def make_extras():
layers = []
in_channels = 1024 # vggใขใธใฅใผใซใใๅบๅใใใใextraใซๅ
ฅๅใใใ็ปๅใใฃใใซๆฐ
# extraใขใธใฅใผใซใฎ็ณใฟ่พผใฟๅฑคใฎใใฃใใซๆฐใ่จญๅฎใใใณใณใใฃใฎใฅใฌใผใทใงใณ
cfg = [256, 512, 128, 256, 128, 256, 128, 256]
layers += [nn.Conv2d(in_channels, cfg[0], kernel_size=(1))]
layers += [nn.Conv2d(cfg[0], cfg[1], kernel_size=(3), stride=2, padding=1)]
layers += [nn.Conv2d(cfg[1], cfg[2], kernel_size=(1))]
layers += [nn.Conv2d(cfg[2], cfg[3], kernel_size=(3), stride=2, padding=1)]
layers += [nn.Conv2d(cfg[3], cfg[4], kernel_size=(1))]
layers += [nn.Conv2d(cfg[4], cfg[5], kernel_size=(3))]
layers += [nn.Conv2d(cfg[5], cfg[6], kernel_size=(1))]
layers += [nn.Conv2d(cfg[6], cfg[7], kernel_size=(3))]
# ๆดปๆงๅ้ขๆฐใฎReLUใฏไปๅใฏSSDใขใใซใฎ้ ไผๆฌใฎใชใใง็จๆใใใใจใซใใ
# extraใขใธใฅใผใซใงใฏ็จๆใใฆใใพใใ
return nn.ModuleList(layers)
# ๅไฝ็ขบ่ช
extras_test = make_extras()
print(extras_test)
```
# locใขใธใฅใผใซใจconfใขใธใฅใผใซใๅฎ่ฃ
```
# ใใใฉใซใใใใฏในใฎใชใใปใใใๅบๅใใloc_layersใ
# ใใใฉใซใใใใฏในใซๅฏพใใๅใฏใฉในใฎไฟก้ ผๅบฆconfidenceใๅบๅใใconf_layersใไฝๆ
def make_loc_conf(num_classes=21, bbox_aspect_num=[4, 6, 6, 6, 4, 4]):
loc_layers = []
conf_layers = []
# VGGใฎ22ๅฑค็ฎใconv4_3๏ผsource1๏ผใซๅฏพใใ็ณใฟ่พผใฟๅฑค
loc_layers += [nn.Conv2d(512, bbox_aspect_num[0]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(512, bbox_aspect_num[0]
* num_classes, kernel_size=3, padding=1)]
# VGGใฎๆ็ตๅฑค๏ผsource2๏ผใซๅฏพใใ็ณใฟ่พผใฟๅฑค
loc_layers += [nn.Conv2d(1024, bbox_aspect_num[1]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(1024, bbox_aspect_num[1]
* num_classes, kernel_size=3, padding=1)]
# extraใฎ๏ผsource3๏ผใซๅฏพใใ็ณใฟ่พผใฟๅฑค
loc_layers += [nn.Conv2d(512, bbox_aspect_num[2]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(512, bbox_aspect_num[2]
* num_classes, kernel_size=3, padding=1)]
# extraใฎ๏ผsource4๏ผใซๅฏพใใ็ณใฟ่พผใฟๅฑค
loc_layers += [nn.Conv2d(256, bbox_aspect_num[3]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(256, bbox_aspect_num[3]
* num_classes, kernel_size=3, padding=1)]
# extraใฎ๏ผsource5๏ผใซๅฏพใใ็ณใฟ่พผใฟๅฑค
loc_layers += [nn.Conv2d(256, bbox_aspect_num[4]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(256, bbox_aspect_num[4]
* num_classes, kernel_size=3, padding=1)]
# extraใฎ๏ผsource6๏ผใซๅฏพใใ็ณใฟ่พผใฟๅฑค
loc_layers += [nn.Conv2d(256, bbox_aspect_num[5]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(256, bbox_aspect_num[5]
* num_classes, kernel_size=3, padding=1)]
return nn.ModuleList(loc_layers), nn.ModuleList(conf_layers)
# ๅไฝ็ขบ่ช
loc_test, conf_test = make_loc_conf()
print(loc_test)
print(conf_test)
```
# L2Normๅฑคใๅฎ่ฃ
```
# convC4_3ใใใฎๅบๅใscale=20ใฎL2Normใงๆญฃ่ฆๅใใๅฑค
class L2Norm(nn.Module):
def __init__(self, input_channels=512, scale=20):
super(L2Norm, self).__init__() # ่ฆชใฏใฉในใฎใณใณในใใฉใฏใฟๅฎ่ก
self.weight = nn.Parameter(torch.Tensor(input_channels))
self.scale = scale # ไฟๆฐweightใฎๅๆๅคใจใใฆ่จญๅฎใใๅค
self.reset_parameters() # ใใฉใกใผใฟใฎๅๆๅ
self.eps = 1e-10
def reset_parameters(self):
'''็ตๅใใฉใกใผใฟใๅคงใใscaleใฎๅคใซใใๅๆๅใๅฎ่ก'''
init.constant_(self.weight, self.scale) # weightใฎๅคใใในใฆscale๏ผ=20๏ผใซใชใ
def forward(self, x):
'''38ร38ใฎ็นๅพด้ใซๅฏพใใฆใ512ใใฃใใซใซใใใฃใฆ2ไนๅใฎใซใผใใๆฑใใ
38ร38ๅใฎๅคใไฝฟ็จใใๅ็นๅพด้ใๆญฃ่ฆๅใใฆใใไฟๆฐใใใ็ฎใใๅฑค'''
# ๅใใฃใใซใซใใใ38ร38ๅใฎ็นๅพด้ใฎใใฃใใซๆนๅใฎ2ไนๅใ่จ็ฎใใ
# ใใใซใซใผใใๆฑใใๅฒใ็ฎใใฆๆญฃ่ฆๅใใ
# normใฎใใณใฝใซใตใคใบใฏtorch.Size([batch_num, 1, 38, 38])ใซใชใใพใ
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt()+self.eps
x = torch.div(x, norm)
# ไฟๆฐใใใใใไฟๆฐใฏใใฃใใซใใจใซ1ใคใงใ512ๅใฎไฟๆฐใๆใค
# self.weightใฎใใณใฝใซใตใคใบใฏtorch.Size([512])ใชใฎใง
# torch.Size([batch_num, 512, 38, 38])ใพใงๅคๅฝขใใพใ
weights = self.weight.unsqueeze(
0).unsqueeze(2).unsqueeze(3).expand_as(x)
out = weights * x
return out
```
# ใใใฉใซใใใใฏในใๅฎ่ฃ
```
# ใใใฉใซใใใใฏในใๅบๅใใใฏใฉใน
class DBox(object):
def __init__(self, cfg):
super(DBox, self).__init__()
# ๅๆ่จญๅฎ
self.image_size = cfg['input_size'] # ็ปๅใตใคใบใฎ300
# [38, 19, โฆ] ๅsourceใฎ็นๅพด้ใใใใฎใตใคใบ
self.feature_maps = cfg['feature_maps']
self.num_priors = len(cfg["feature_maps"]) # sourceใฎๅๆฐ=6
self.steps = cfg['steps'] # [8, 16, โฆ] DBoxใฎใใฏใปใซใตใคใบ
self.min_sizes = cfg['min_sizes']
# [30, 60, โฆ] ๅฐใใๆญฃๆนๅฝขใฎDBoxใฎใใฏใปใซใตใคใบ๏ผๆญฃ็ขบใซใฏ้ข็ฉ๏ผ
self.max_sizes = cfg['max_sizes']
# [60, 111, โฆ] ๅคงใใๆญฃๆนๅฝขใฎDBoxใฎใใฏใปใซใตใคใบ๏ผๆญฃ็ขบใซใฏ้ข็ฉ๏ผ
self.aspect_ratios = cfg['aspect_ratios'] # ้ทๆนๅฝขใฎDBoxใฎใขในใใฏใๆฏ
def make_dbox_list(self):
'''DBoxใไฝๆใใ'''
mean = []
# 'feature_maps': [38, 19, 10, 5, 3, 1]
for k, f in enumerate(self.feature_maps):
for i, j in product(range(f), repeat=2): # fใพใงใฎๆฐใง2ใใขใฎ็ตใฟๅใใใไฝใใf_P_2 ๅ
# ็นๅพด้ใฎ็ปๅใตใคใบ
# 300 / 'steps': [8, 16, 32, 64, 100, 300],
f_k = self.image_size / self.steps[k]
# DBoxใฎไธญๅฟๅบงๆจ x,yใใใ ใใ0๏ฝ1ใง่ฆๆ ผๅใใฆใใ
cx = (j + 0.5) / f_k
cy = (i + 0.5) / f_k
# ใขในใใฏใๆฏ1ใฎๅฐใใDBox [cx,cy, width, height]
# 'min_sizes': [30, 60, 111, 162, 213, 264]
s_k = self.min_sizes[k]/self.image_size
mean += [cx, cy, s_k, s_k]
# ใขในใใฏใๆฏ1ใฎๅคงใใDBox [cx,cy, width, height]
# 'max_sizes': [60, 111, 162, 213, 264, 315],
s_k_prime = sqrt(s_k * (self.max_sizes[k]/self.image_size))
mean += [cx, cy, s_k_prime, s_k_prime]
# ใใฎไปใฎใขในใใฏใๆฏใฎdefBox [cx,cy, width, height]
for ar in self.aspect_ratios[k]:
mean += [cx, cy, s_k*sqrt(ar), s_k/sqrt(ar)]
mean += [cx, cy, s_k/sqrt(ar), s_k*sqrt(ar)]
# DBoxใใใณใฝใซใซๅคๆ torch.Size([8732, 4])
output = torch.Tensor(mean).view(-1, 4)
# DBoxใ็ปๅใฎๅคใซใฏใฟๅบใใฎใ้ฒใใใใๅคงใใใๆๅฐ0ใๆๅคง1ใซใใ
output.clamp_(max=1, min=0)
return output
# ๅไฝใฎ็ขบ่ช
# SSD300ใฎ่จญๅฎ
ssd_cfg = {
'num_classes': 21, # ่ๆฏใฏใฉในใๅซใใๅ่จใฏใฉในๆฐ
'input_size': 300, # ็ปๅใฎๅ
ฅๅใตใคใบ
'bbox_aspect_num': [4, 6, 6, 6, 4, 4], # ๅบๅใใDBoxใฎใขในใใฏใๆฏใฎ็จฎ้ก
'feature_maps': [38, 19, 10, 5, 3, 1], # ๅsourceใฎ็ปๅใตใคใบ
'steps': [8, 16, 32, 64, 100, 300], # DBOXใฎๅคงใใใๆฑบใใ
'min_sizes': [30, 60, 111, 162, 213, 264], # DBOXใฎๅคงใใใๆฑบใใ
'max_sizes': [60, 111, 162, 213, 264, 315], # DBOXใฎๅคงใใใๆฑบใใ
'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
}
# DBoxไฝๆ
dbox = DBox(ssd_cfg)
dbox_list = dbox.make_dbox_list()
# DBoxใฎๅบๅใ็ขบ่ชใใ
pd.DataFrame(dbox_list.numpy())
```
# SSDใฏใฉในใๅฎ่ฃ
```
# SSDใฏใฉในใไฝๆใใ
class SSD(nn.Module):
def __init__(self, phase, cfg):
super(SSD, self).__init__()
self.phase = phase # train or inferenceใๆๅฎ
self.num_classes = cfg["num_classes"] # ใฏใฉในๆฐ=21
# SSDใฎใใใใฏใผใฏใไฝใ
self.vgg = make_vgg()
self.extras = make_extras()
self.L2Norm = L2Norm()
self.loc, self.conf = make_loc_conf(
cfg["num_classes"], cfg["bbox_aspect_num"])
# DBoxไฝๆ
dbox = DBox(cfg)
self.dbox_list = dbox.make_dbox_list()
# ๆจ่ซๆใฏใฏใฉในใDetectใใ็จๆใใพใ
if phase == 'inference':
self.detect = Detect()
# ๅไฝ็ขบ่ช
ssd_test = SSD(phase="train", cfg=ssd_cfg)
print(ssd_test)
```
# ใใใใ2.5็ฏ ้ ไผๆฌใฎๅฎ่ฃ
ใงใ
# ้ขๆฐdecodeใๅฎ่ฃ
ใใ
```
# ใชใใปใใๆ
ๅ ฑใไฝฟใใDBoxใBBoxใซๅคๆใใ้ขๆฐ
def decode(loc, dbox_list):
"""
ใชใใปใใๆ
ๅ ฑใไฝฟใใDBoxใBBoxใซๅคๆใใใ
Parameters
----------
loc: [8732,4]
SSDใขใใซใงๆจ่ซใใใชใใปใใๆ
ๅ ฑใ
dbox_list: [8732,4]
DBoxใฎๆ
ๅ ฑ
Returns
-------
boxes : [xmin, ymin, xmax, ymax]
BBoxใฎๆ
ๅ ฑ
"""
# DBoxใฏ[cx, cy, width, height]ใงๆ ผ็ดใใใฆใใ
# locใ[ฮcx, ฮcy, ฮwidth, ฮheight]ใงๆ ผ็ดใใใฆใใ
# ใชใใปใใๆ
ๅ ฑใใBBoxใๆฑใใ
boxes = torch.cat((
dbox_list[:, :2] + loc[:, :2] * 0.1 * dbox_list[:, 2:],
dbox_list[:, 2:] * torch.exp(loc[:, 2:] * 0.2)), dim=1)
# boxesใฎใตใคใบใฏtorch.Size([8732, 4])ใจใชใใพใ
# BBoxใฎๅบงๆจๆ
ๅ ฑใ[cx, cy, width, height]ใใ[xmin, ymin, xmax, ymax] ใซ
boxes[:, :2] -= boxes[:, 2:] / 2 # ๅบงๆจ(xmin,ymin)ใธๅคๆ
boxes[:, 2:] += boxes[:, :2] # ๅบงๆจ(xmax,ymax)ใธๅคๆ
return boxes
```
# Non-Maximum Suppressionใ่กใ้ขๆฐใๅฎ่ฃ
ใใ
```
# Non-Maximum Suppressionใ่กใ้ขๆฐ
def nm_suppression(boxes, scores, overlap=0.45, top_k=200):
"""
Non-Maximum Suppressionใ่กใ้ขๆฐใ
boxesใฎใใก่ขซใ้ใ๏ผoverlapไปฅไธ๏ผใฎBBoxใๅ้คใใใ
Parameters
----------
boxes : [็ขบไฟกๅบฆ้พๅค๏ผ0.01๏ผใ่ถ
ใใBBoxๆฐ,4]
BBoxๆ
ๅ ฑใ
scores :[็ขบไฟกๅบฆ้พๅค๏ผ0.01๏ผใ่ถ
ใใBBoxๆฐ]
confใฎๆ
ๅ ฑ
Returns
-------
keep : ใชในใ
confใฎ้้ ใซnmsใ้้ใใindexใๆ ผ็ด
count๏ผint
nmsใ้้ใใBBoxใฎๆฐ
"""
# returnใฎใฒใชๅฝขใไฝๆ
count = 0
keep = scores.new(scores.size(0)).zero_().long()
# keep๏ผtorch.Size([็ขบไฟกๅบฆ้พๅคใ่ถ
ใใBBoxๆฐ])ใ่ฆ็ด ใฏๅ
จ้จ0
# ๅBBoxใฎ้ข็ฉareaใ่จ็ฎ
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
# boxesใใณใใผใใใๅพใงใBBoxใฎ่ขซใๅบฆๅใIOUใฎ่จ็ฎใซไฝฟ็จใใ้ใฎใฒใชๅฝขใจใใฆ็จๆ
tmp_x1 = boxes.new()
tmp_y1 = boxes.new()
tmp_x2 = boxes.new()
tmp_y2 = boxes.new()
tmp_w = boxes.new()
tmp_h = boxes.new()
# socreใๆ้ ใซไธฆใณๅคใใ
v, idx = scores.sort(0)
# ไธไฝtop_kๅ๏ผ200ๅ๏ผใฎBBoxใฎindexใๅใๅบใ๏ผ200ๅๅญๅจใใชใๅ ดๅใใใ๏ผ
idx = idx[-top_k:]
# idxใฎ่ฆ็ด ๆฐใ0ใงใชใ้ใใซใผใใใ
while idx.numel() > 0:
i = idx[-1] # ็พๅจใฎconfๆๅคงใฎindexใiใซ
# keepใฎ็พๅจใฎๆๅพใซconfๆๅคงใฎindexใๆ ผ็ดใใ
# ใใฎindexใฎBBoxใจ่ขซใใๅคงใใBBoxใใใใใๆถๅปใใ
keep[count] = i
count += 1
# ๆๅพใฎBBoxใซใชใฃใๅ ดๅใฏใใซใผใใๆใใ
if idx.size(0) == 1:
break
# ็พๅจใฎconfๆๅคงใฎindexใkeepใซๆ ผ็ดใใใฎใงใidxใใฒใจใคๆธใใ
idx = idx[:-1]
# -------------------
# ใใใใkeepใซๆ ผ็ดใใBBoxใจ่ขซใใฎๅคงใใBBoxใๆฝๅบใใฆ้คๅปใใ
# -------------------
# ใฒใจใคๆธใใใidxใพใงใฎBBoxใใoutใซๆๅฎใใๅคๆฐใจใใฆไฝๆใใ
torch.index_select(x1, 0, idx, out=tmp_x1)
torch.index_select(y1, 0, idx, out=tmp_y1)
torch.index_select(x2, 0, idx, out=tmp_x2)
torch.index_select(y2, 0, idx, out=tmp_y2)
# ใในใฆใฎBBoxใซๅฏพใใฆใ็พๅจใฎBBox=indexใiใจ่ขซใฃใฆใใๅคใพใงใซ่จญๅฎ(clamp)
tmp_x1 = torch.clamp(tmp_x1, min=x1[i])
tmp_y1 = torch.clamp(tmp_y1, min=y1[i])
tmp_x2 = torch.clamp(tmp_x2, max=x2[i])
tmp_y2 = torch.clamp(tmp_y2, max=y2[i])
# wใจhใฎใใณใฝใซใตใคใบใindexใ1ใคๆธใใใใใฎใซใใ
tmp_w.resize_as_(tmp_x2)
tmp_h.resize_as_(tmp_y2)
# clampใใ็ถๆ
ใงใฎBBoxใฎๅน
ใจ้ซใใๆฑใใ
tmp_w = tmp_x2 - tmp_x1
tmp_h = tmp_y2 - tmp_y1
# ๅน
ใ้ซใใ่ฒ ใซใชใฃใฆใใใใฎใฏ0ใซใใ
tmp_w = torch.clamp(tmp_w, min=0.0)
tmp_h = torch.clamp(tmp_h, min=0.0)
# clampใใใ็ถๆ
ใงใฎ้ข็ฉใๆฑใใ
inter = tmp_w*tmp_h
# IoU = intersect้จๅ / (area(a) + area(b) - intersect้จๅ)ใฎ่จ็ฎ
rem_areas = torch.index_select(area, 0, idx) # ๅBBoxใฎๅ
ใฎ้ข็ฉ
union = (rem_areas - inter) + area[i] # 2ใคใฎใจใชใขใฎๅ๏ผOR๏ผใฎ้ข็ฉ
IoU = inter/union
# IoUใoverlapใใๅฐใใidxใฎใฟใๆฎใ
idx = idx[IoU.le(overlap)] # leใฏLess than or Equal toใฎๅฆ็ใใใๆผ็ฎใงใ
# IoUใoverlapใใๅคงใใidxใฏใๆๅใซ้ธใใงkeepใซๆ ผ็ดใใidxใจๅใ็ฉไฝใซๅฏพใใฆBBoxใๅฒใใงใใใใๆถๅป
# whileใฎใซใผใใๆใใใ็ตไบ
return keep, count
```
# Detectใฏใฉในใๅฎ่ฃ
ใใ
```
# SSDใฎๆจ่ซๆใซconfใจlocใฎๅบๅใใใ่ขซใใ้คๅปใใBBoxใๅบๅใใ
class Detect(Function):
def __init__(self, conf_thresh=0.01, top_k=200, nms_thresh=0.45):
self.softmax = nn.Softmax(dim=-1) # confใใฝใใใใใฏใน้ขๆฐใงๆญฃ่ฆๅใใใใใซ็จๆ
self.conf_thresh = conf_thresh # confใconf_thresh=0.01ใใ้ซใDBoxใฎใฟใๆฑใ
self.top_k = top_k # nm_supressionใงconfใฎ้ซใtop_kๅใ่จ็ฎใซไฝฟ็จใใ, top_k = 200
self.nms_thresh = nms_thresh # nm_supressionใงIOUใnms_thresh=0.45ใใๅคงใใใจใๅไธ็ฉไฝใธใฎBBoxใจใฟใชใ
def forward(self, loc_data, conf_data, dbox_list):
"""
้ ไผๆฌใฎ่จ็ฎใๅฎ่กใใใ
Parameters
----------
loc_data: [batch_num,8732,4]
ใชใใปใใๆ
ๅ ฑใ
conf_data: [batch_num, 8732,num_classes]
ๆคๅบใฎ็ขบไฟกๅบฆใ
dbox_list: [8732,4]
DBoxใฎๆ
ๅ ฑ
Returns
-------
output : torch.Size([batch_num, 21, 200, 5])
๏ผbatch_numใใฏใฉในใconfใฎtop200ใBBoxใฎๆ
ๅ ฑ๏ผ
"""
# ๅใตใคใบใๅๅพ
num_batch = loc_data.size(0) # ใใใใใใฎใตใคใบ
num_dbox = loc_data.size(1) # DBoxใฎๆฐ = 8732
num_classes = conf_data.size(2) # ใฏใฉในๆฐ = 21
# confใฏใฝใใใใใฏในใ้ฉ็จใใฆๆญฃ่ฆๅใใ
conf_data = self.softmax(conf_data)
# ๅบๅใฎๅใไฝๆใใใใใณใฝใซใตใคใบใฏ[minibatchๆฐ, 21, 200, 5]
output = torch.zeros(num_batch, num_classes, self.top_k, 5)
# cof_dataใ[batch_num,8732,num_classes]ใใ[batch_num, num_classes,8732]ใซ้ ็ชๅคๆด
conf_preds = conf_data.transpose(2, 1)
# ใใใใใใใจใฎใซใผใ
for i in range(num_batch):
# 1. locใจDBoxใใไฟฎๆญฃใใBBox [xmin, ymin, xmax, ymax] ใๆฑใใ
decoded_boxes = decode(loc_data[i], dbox_list)
# confใฎใณใใผใไฝๆ
conf_scores = conf_preds[i].clone()
# ็ปๅใฏใฉในใใจใฎใซใผใ๏ผ่ๆฏใฏใฉในใฎindexใงใใ0ใฏ่จ็ฎใใใindex=1ใใ๏ผ
for cl in range(1, num_classes):
# 2.confใฎ้พๅคใ่ถ
ใใBBoxใๅใๅบใ
# confใฎ้พๅคใ่ถ
ใใฆใใใใฎใในใฏใไฝๆใใ
# ้พๅคใ่ถ
ใใconfใฎใคใณใใใฏในใc_maskใจใใฆๅๅพ
c_mask = conf_scores[cl].gt(self.conf_thresh)
# gtใฏGreater thanใฎใใจใgtใซใใ้พๅคใ่ถ
ใใใใฎใ1ใซใไปฅไธใ0ใซใชใ
# conf_scores:torch.Size([21, 8732])
# c_mask:torch.Size([8732])
# scoresใฏtorch.Size([้พๅคใ่ถ
ใใBBoxๆฐ])
scores = conf_scores[cl][c_mask]
# ้พๅคใ่ถ
ใใconfใใชใๅ ดๅใใคใพใscores=[]ใฎใจใใฏใไฝใใใชใ
if scores.nelement() == 0: # nelementใง่ฆ็ด ๆฐใฎๅ่จใๆฑใใ
continue
# c_maskใใdecoded_boxesใซ้ฉ็จใงใใใใใซใตใคใบใๅคๆดใใพใ
l_mask = c_mask.unsqueeze(1).expand_as(decoded_boxes)
# l_mask:torch.Size([8732, 4])
# l_maskใdecoded_boxesใซ้ฉๅฟใใพใ
boxes = decoded_boxes[l_mask].view(-1, 4)
# decoded_boxes[l_mask]ใง1ๆฌกๅ
ใซใชใฃใฆใใพใใฎใงใ
# viewใง๏ผ้พๅคใ่ถ
ใใBBoxๆฐ, 4๏ผใตใคใบใซๅคๅฝขใใชใใ
# 3. Non-Maximum Suppressionใๅฎๆฝใใ่ขซใฃใฆใใBBoxใๅใ้คใ
ids, count = nm_suppression(
boxes, scores, self.nms_thresh, self.top_k)
# ids๏ผconfใฎ้้ ใซNon-Maximum Suppressionใ้้ใใindexใๆ ผ็ด
# count๏ผNon-Maximum Suppressionใ้้ใใBBoxใฎๆฐ
# outputใซNon-Maximum Suppressionใๆใใ็ตๆใๆ ผ็ด
output[i, cl, :count] = torch.cat((scores[ids[:count]].unsqueeze(1),
boxes[ids[:count]]), 1)
return output # torch.Size([1, 21, 200, 5])
```
# SSDใฏใฉในใๅฎ่ฃ
ใใ
```
# SSDใฏใฉในใไฝๆใใ
class SSD(nn.Module):
def __init__(self, phase, cfg):
super(SSD, self).__init__()
self.phase = phase # train or inferenceใๆๅฎ
self.num_classes = cfg["num_classes"] # ใฏใฉในๆฐ=21
# SSDใฎใใใใฏใผใฏใไฝใ
self.vgg = make_vgg()
self.extras = make_extras()
self.L2Norm = L2Norm()
self.loc, self.conf = make_loc_conf(
cfg["num_classes"], cfg["bbox_aspect_num"])
# DBoxไฝๆ
dbox = DBox(cfg)
self.dbox_list = dbox.make_dbox_list()
# ๆจ่ซๆใฏใฏใฉในใDetectใใ็จๆใใพใ
if phase == 'inference':
self.detect = Detect()
def forward(self, x):
sources = list() # locใจconfใธใฎๅ
ฅๅsource1๏ฝ6ใๆ ผ็ด
loc = list() # locใฎๅบๅใๆ ผ็ด
conf = list() # confใฎๅบๅใๆ ผ็ด
# vggใฎconv4_3ใพใง่จ็ฎใใ
for k in range(23):
x = self.vgg[k](x)
# conv4_3ใฎๅบๅใL2Normใซๅ
ฅๅใใsource1ใไฝๆใsourcesใซ่ฟฝๅ
source1 = self.L2Norm(x)
sources.append(source1)
# vggใๆๅพใพใง่จ็ฎใใsource2ใไฝๆใsourcesใซ่ฟฝๅ
for k in range(23, len(self.vgg)):
x = self.vgg[k](x)
sources.append(x)
# extrasใฎconvใจReLUใ่จ็ฎ
# source3๏ฝ6ใใsourcesใซ่ฟฝๅ
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1: # convโReLUโcovโReLUใใใใsourceใซๅ
ฅใใ
sources.append(x)
# source1๏ฝ6ใซใใใใใๅฏพๅฟใใ็ณใฟ่พผใฟใ1ๅใใค้ฉ็จใใ
# zipใงforใซใผใใฎ่คๆฐใฎใชในใใฎ่ฆ็ด ใๅๅพ
# source1๏ฝ6ใพใงใใใฎใงใ6ๅใซใผใใๅใ
for (x, l, c) in zip(sources, self.loc, self.conf):
# Permuteใฏ่ฆ็ด ใฎ้ ็ชใๅ
ฅใๆฟใ
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
# l(x)ใจc(x)ใง็ณใฟ่พผใฟใๅฎ่ก
# l(x)ใจc(x)ใฎๅบๅใตใคใบใฏ[batch_num, 4*ใขในใใฏใๆฏใฎ็จฎ้กๆฐ, featuremapใฎ้ซใ, featuremapๅน
]
# sourceใซใใฃใฆใใขในใใฏใๆฏใฎ็จฎ้กๆฐใ็ฐใชใใ้ขๅใชใฎใง้ ็ชๅ
ฅใๆฟใใฆๆดใใ
# permuteใง่ฆ็ด ใฎ้ ็ชใๅ
ฅใๆฟใใ
# [minibatchๆฐ, featuremapๆฐ, featuremapๆฐ,4*ใขในใใฏใๆฏใฎ็จฎ้กๆฐ]ใธ
# ๏ผๆณจ้๏ผ
# torch.contiguous()ใฏใกใขใชไธใง่ฆ็ด ใ้ฃ็ถ็ใซ้
็ฝฎใ็ดใๅฝไปคใงใใ
# ใใจใงview้ขๆฐใไฝฟ็จใใพใใ
# ใใฎviewใ่กใใใใซใฏใๅฏพ่ฑกใฎๅคๆฐใใกใขใชไธใง้ฃ็ถ้
็ฝฎใใใฆใใๅฟ
่ฆใใใใพใใ
# ใใใซlocใจconfใฎๅฝขใๅคๅฝข
# locใฎใตใคใบใฏใtorch.Size([batch_num, 34928])
# confใฎใตใคใบใฏtorch.Size([batch_num, 183372])ใซใชใ
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
# ใใใซlocใจconfใฎๅฝขใๆดใใ
# locใฎใตใคใบใฏใtorch.Size([batch_num, 8732, 4])
# confใฎใตใคใบใฏใtorch.Size([batch_num, 8732, 21])
loc = loc.view(loc.size(0), -1, 4)
conf = conf.view(conf.size(0), -1, self.num_classes)
# ๆๅพใซๅบๅใใ
output = (loc, conf, self.dbox_list)
if self.phase == "inference": # ๆจ่ซๆ
# ใฏใฉในใDetectใใฎforwardใๅฎ่ก
# ่ฟใๅคใฎใตใคใบใฏ torch.Size([batch_num, 21, 200, 5])
return self.detect(output[0], output[1], output[2])
else: # ๅญฆ็ฟๆ
return output
# ่ฟใๅคใฏ(loc, conf, dbox_list)ใฎใฟใใซ
```
ไปฅไธ
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Pointwise-Local-Reconstruction-Error" data-toc-modified-id="Pointwise-Local-Reconstruction-Error-1"><span class="toc-item-num">1 </span>Pointwise Local Reconstruction Error</a></span></li></ul></div>
Pointwise Local Reconstruction Error
====================================
Example for the usage of the `skcosmo.metrics.pointwise_local_reconstruction_error` as pointwise local reconstruction error (LFRE) on the degenerate CH4 manifold. We apply the local reconstruction measure on the degenerate CH4 manifold dataset. This dataset was specifically constructed to be representable by a 4-body features (bispectrum) but not by a 3-body features (power spectrum). In other words the dataset contains environments which are different, but have the same 3-body features. For more details about the dataset please refer to [Pozdnyakov 2020](https://doi.org/10.1103/PhysRevLett.125.166001) .
The skcosmo dataset already contains the 3 and 4-body features computed with [librascal](https://github.com/lab-cosmo/librascal) so we can load it and compare it with the LFRE.
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rc('font', size=20)
from skcosmo.datasets import load_degenerate_CH4_manifold
from skcosmo.metrics import pointwise_local_reconstruction_error
# load features
degenerate_manifold = load_degenerate_CH4_manifold()
power_spectrum_features = degenerate_manifold.data.SOAP_power_spectrum
bispectrum_features = degenerate_manifold.data.SOAP_bispectrum
print(degenerate_manifold.DESCR)
n_local_points = 20
print("Computing pointwise LFRE...")
# local reconstruction error of power spectrum features using bispectrum features
power_spectrum_to_bispectrum_pointwise_lfre = pointwise_local_reconstruction_error(
power_spectrum_features,
bispectrum_features,
n_local_points,
train_idx = np.arange(0, len(power_spectrum_features), 2),
test_idx = np.arange(0, len(power_spectrum_features)),
estimator=None,
n_jobs=4,
)
# local reconstruction error of bispectrum features using power spectrum features
bispectrum_to_power_spectrum_pointwise_lfre = pointwise_local_reconstruction_error(
bispectrum_features,
power_spectrum_features,
n_local_points,
train_idx = np.arange(0, len(power_spectrum_features), 2),
test_idx = np.arange(0, len(power_spectrum_features)),
estimator=None,
n_jobs=4,
)
print("Computing pointwise LFRE finished.")
print(
"LFRE(3-body, 4-body) = ",
np.linalg.norm(power_spectrum_to_bispectrum_pointwise_lfre)/np.sqrt(len(power_spectrum_to_bispectrum_pointwise_lfre))
)
print(
"LFRE(4-body, 3-body) = ",
np.linalg.norm(bispectrum_to_power_spectrum_pointwise_lfre)/np.sqrt(len(power_spectrum_to_bispectrum_pointwise_lfre))
)
fig, (ax34, ax43) = plt.subplots(
1, 2, constrained_layout=True, figsize=(16, 7.5), sharey="row", sharex=True
)
vmax = 0.5
X, Y = np.meshgrid(np.linspace(0.7, 0.9, 9), np.linspace(-0.1, 0.1, 9))
pcm = ax34.contourf(
X,
Y,
power_spectrum_to_bispectrum_pointwise_lfre[81:].reshape(9, 9).T,
vmin=0,
vmax=vmax,
)
ax43.contourf(
X,
Y,
bispectrum_to_power_spectrum_pointwise_lfre[81:].reshape(9, 9).T,
vmin=0,
vmax=vmax,
)
ax34.axhline(y=0, color="red", linewidth=5)
ax43.axhline(y=0, color="red", linewidth=5)
ax34.set_ylabel(r"v/$\pi$")
ax34.set_xlabel(r"u/$\pi$")
ax43.set_xlabel(r"u/$\pi$")
ax34.set_title(r"$X^-$ LFRE(3-body, 4-body)")
ax43.set_title(r"$X^-$ LFRE(4-body, 3-body)")
cbar = fig.colorbar(pcm, ax=[ax34, ax43], label="LFRE", location="bottom")
plt.show()
```
The environments span a manifold which is described by the coordinates $v/\pi$ and $u/\pi$ (please refer to [Pozdnyakov 2020](https://doi.org/10.1103/PhysRevLett.125.166001) for a concrete understanding of the manifold). The LFRE is presented for each environment in the manifold in the two contour plots. It can be seen that the reconstruction error of 4-body features using 3-body features (the left plot) is most significant along the degenerate line (the horizontal red line). This agrees with the fact that the 3-body features remain the same on the degenerate line and can therefore not reconstruct the 4-body features. On the other hand the 4-body features can perfectly reconstruct the 3-body features as seen in the right plot.
| github_jupyter |
```
%%init_spark
launcher.jars = ["file:///opt/benchmark-tools/spark-sql-perf/target/scala-2.12/spark-sql-perf_2.12-0.5.1-SNAPSHOT.jar"]
launcher.conf.set("spark.sql.warehouse.dir", "hdfs:///user/livy")
!hadoop fs -mkdir /user/livy
val scaleFactor = "1" // data scale 1GB
val iterations = 1 // how many times to run the whole set of queries.
val format = "parquet" // support parquet or orc
val storage = "hdfs" // choose HDFS
val bucket_name = "/user/livy" // scala notebook only has the write permission of "hdfs:///user/livy" directory
val partitionTables = true // create partition tables
val query_filter = Seq() // Seq() == all queries
//val query_filter = Seq("q1-v2.4", "q2-v2.4") // run subset of queries
val randomizeQueries = false // run queries in a random order. Recommended for parallel runs.
// detailed results will be written as JSON to this location.
var resultLocation = s"${storage}://${bucket_name}/results/tpcds_${format}/${scaleFactor}/"
var databaseName = s"tpcds_${format}_scale_${scaleFactor}_db"
val use_arrow = false // when you want to use gazella_plugin to run TPC-DS, you need to set it true.
if (use_arrow){
val data_path= s"${storage}://${bucket_name}/datagen/tpcds_${format}/${scaleFactor}"
resultLocation = s"${storage}://${bucket_name}/results/tpcds_arrow/${scaleFactor}/"
databaseName = s"tpcds_arrow_scale_${scaleFactor}_db"
val tables = Seq("call_center", "catalog_page", "catalog_returns", "catalog_sales", "customer", "customer_address", "customer_demographics", "date_dim", "household_demographics", "income_band", "inventory", "item", "promotion", "reason", "ship_mode", "store", "store_returns", "store_sales", "time_dim", "warehouse", "web_page", "web_returns", "web_sales", "web_site")
if (spark.catalog.databaseExists(s"$databaseName")) {
println(s"$databaseName has exists!")
}else{
spark.sql(s"create database if not exists $databaseName").show
spark.sql(s"use $databaseName").show
for (table <- tables) {
if (spark.catalog.tableExists(s"$table")){
println(s"$table has exists!")
}else{
spark.catalog.createTable(s"$table", s"$data_path/$table", "arrow")
}
}
if (partitionTables) {
for (table <- tables) {
try{
spark.sql(s"ALTER TABLE $table RECOVER PARTITIONS").show
}catch{
case e: Exception => println(e)
}
}
}
}
}
val timeout = 60 // timeout in hours
// COMMAND ----------
// Spark configuration
spark.conf.set("spark.sql.broadcastTimeout", "10000") // good idea for Q14, Q88.
// ... + any other configuration tuning
// COMMAND ----------
sql(s"use $databaseName")
import com.databricks.spark.sql.perf.tpcds.TPCDS
val tpcds = new TPCDS (sqlContext = spark.sqlContext)
def queries = {
val filtered_queries = query_filter match {
case Seq() => tpcds.tpcds2_4Queries
case _ => tpcds.tpcds2_4Queries.filter(q => query_filter.contains(q.name))
}
if (randomizeQueries) scala.util.Random.shuffle(filtered_queries) else filtered_queries
}
val experiment = tpcds.runExperiment(
queries,
iterations = iterations,
resultLocation = resultLocation,
tags = Map("runtype" -> "benchmark", "database" -> databaseName, "scale_factor" -> scaleFactor))
println(experiment.toString)
experiment.waitForFinish(timeout*60*60)
```
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Slice-specified-nodes-in-dimspec" data-toc-modified-id="Slice-specified-nodes-in-dimspec-1"><span class="toc-item-num">1 </span>Slice specified nodes in dimspec</a></span></li><li><span><a href="#Test-parallelism" data-toc-modified-id="Test-parallelism-2"><span class="toc-item-num">2 </span>Test parallelism</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Example-task" data-toc-modified-id="Example-task-2.0.1"><span class="toc-item-num">2.0.1 </span>Example task</a></span></li></ul></li><li><span><a href="#Serial-invocation" data-toc-modified-id="Serial-invocation-2.1"><span class="toc-item-num">2.1 </span>Serial invocation</a></span><ul class="toc-item"><li><span><a href="#Maybe-sqash-dimensions-to-fit-into-einsum?" data-toc-modified-id="Maybe-sqash-dimensions-to-fit-into-einsum?-2.1.1"><span class="toc-item-num">2.1.1 </span>Maybe sqash dimensions to fit into einsum?</a></span></li><li><span><a href="#Many-var-parallelisation" data-toc-modified-id="Many-var-parallelisation-2.1.2"><span class="toc-item-num">2.1.2 </span>Many var parallelisation</a></span></li></ul></li><li><span><a href="#Plot-parallelisation-theoretical-speedup" data-toc-modified-id="Plot-parallelisation-theoretical-speedup-2.2"><span class="toc-item-num">2.2 </span>Plot parallelisation theoretical speedup</a></span></li><li><span><a href="#Use-unix-tools" data-toc-modified-id="Use-unix-tools-2.3"><span class="toc-item-num">2.3 </span>Use unix tools</a></span><ul class="toc-item"><li><span><a href="#Threading" data-toc-modified-id="Threading-2.3.1"><span class="toc-item-num">2.3.1 </span>Threading</a></span></li><li><span><a href="#Multiprocessing" data-toc-modified-id="Multiprocessing-2.3.2"><span class="toc-item-num">2.3.2 </span>Multiprocessing</a></span></li></ul></li></ul></li></ul></div>
```
#import ray
import pyrofiler as pyrof
from pyrofiler.pyrofiler import Profiler
from pyrofiler import callbacks
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import sys
from multiprocessing import Pool, Array
from multiprocessing.dummy import Pool as ThreadPool
import os
sns.set_style('whitegrid')
np.random.seed(42)
def work(arg):
i,x,y, par_vars, result_idx= arg
patch = sliced_contract(x, y, par_vars, i)
sl = target_slice(result_idx, par_vars, i)
pool = ThreadPool(processes=2**7)
```
# Slice specified nodes in dimspec
```
def _none_slice():
return slice(None)
def _get_idx(x, idxs, slice_idx, shapes=None):
if shapes is None:
shapes = [2]*len(idxs)
point = np.unravel_index(slice_idx, shapes)
get_point = {i:p for i,p in zip(idxs, point)}
if x in idxs:
p = get_point[x]
return slice(p,p+1)
else:
return _none_slice()
def _slices_for_idxs(idxs, *args, shapes=None, slice_idx=0):
"""Return array of slices along idxs"""
slices = []
for indexes in args:
_slice = [_get_idx(x, idxs, slice_idx, shapes) for x in indexes ]
slices.append(tuple(_slice))
return slices
def log_log_scale():
plt.yscale('log')
plt.xscale('log')
def minorticks():
plt.minorticks_on()
plt.grid(which='minor', alpha=0.5, linestyle='-', axis='both')
```
# Test parallelism
### Example task
```
def get_example_task(A=8, B=10, C=7, dim1=0):
shape1 = [2]*(A+B)
shape2 = [2]*(A+C)
for i in range(dim1):
shape1[-i] = 1
shape2[-i] = 1
T1 = np.random.randn(*shape1)
T2 = np.random.randn(*shape2)
common = list(range(A))
idxs1 = common + list(range(A, A+B))
idxs2 = common + list(range(A+B, A+B+C))
return (T1, idxs1), (T2, idxs2)
x, y = get_example_task(A=9)
x[1], y[1]
```
## Serial invocation
```
def contract(A, B):
a, idxa = A
b, idxb = B
contract_idx = set(idxa) & set(idxb)
result_idx = set(idxa + idxb)
print('contract result idx',result_idx)
C = np.einsum(a,idxa, b,idxb, result_idx)
return C
def sliced_contract(x, y, idxs, num):
slices = _slices_for_idxs(idxs, x[1], y[1], slice_idx=num)
a = x[0][slices[0]]
b = y[0][slices[1]]
with pyrof.timing(f'\tcontract sliced {num}'):
C = contract((a, x[1]), (b, y[1]))
return C
def target_slice(result_idx, idxs, num):
slices = _slices_for_idxs(idxs, result_idx, slice_idx=num)
return slices
with pyrof.timing('contract'):
C = contract(x, y)
```
### Maybe sqash dimensions to fit into einsum?
```
def __contract_bound(A, B):
a, idxa = A
b, idxb = B
contract_idx = set(idxa) & set(idxb)
def glue_first(shape):
sh = [shape[0] * shape[1]] + list(shape[2:])
return sh
result_idx = set(idxa + idxb)
_map_a = {k:v for k,v in zip(idxa, a.shape)}
_map_b = {k:v for k,v in zip(idxb, b.shape)}
_map = {**_map_a, **_map_b}
print(_map)
result_idx = sorted(tuple(_map.keys()))
target_shape = tuple([_map[i] for i in result_idx])
_dimlen = len(result_idx)
_maxdims = 22
print('dimlen',_dimlen)
new_a, new_b = a.shape, b.shape
if _dimlen>_maxdims:
_contr_dim = _dimlen - _maxdims
print(len(new_a), len(new_b))
for i in range(_contr_dim):
idxa = idxa[1:]
idxb = idxb[1:]
new_a = glue_first(new_a)
new_b = glue_first(new_b)
_map_a = {k:v for k,v in zip(idxa, a.shape)}
_map_b = {k:v for k,v in zip(idxb, b.shape)}
_map = {**_map_a, **_map_b}
print(_map)
result_idx = sorted(tuple(_map.keys()))
print(len(new_a), len(new_b))
a = a.reshape(new_a)
b = b.reshape(new_b)
print(a.shape, b.shape)
print(idxa, idxb)
print('btsh',result_idx, target_shape)
C = np.einsum(a,idxa, b,idxb, result_idx)
return C.reshape(*target_shape)
def __add_dims(x, dims, ofs):
arr, idxs = x
arr = arr.reshape(list(arr.shape) + [1]*dims)
md = max(idxs)
return arr, idxs + list(range(md+ofs, ofs+md+dims))
```
### Many var parallelisation
```
prof_seq = Profiler()
prof_seq.use_append()
contract_idx = set(x[1]) & set(y[1])
result_idx = set(x[1] + y[1])
for i in range(1):
_ = contract(x,y)
for rank in range(1,7):
with prof_seq.timing('Single thread'):
C = contract(x,y)
par_vars = list(range(rank))
target_shape = C.shape
with prof_seq.timing('One patch: total'):
i = 0
with prof_seq.timing('One patch: compute'):
patch = sliced_contract(x, y, par_vars, i)
C_par = np.empty(target_shape)
with prof_seq.timing('One patch: assign'):
_slice = target_slice(result_idx, par_vars, i)
C_par[_slice[0]] = patch
```
## Plot parallelisation theoretical speedup
```
prof_seq.data
threads = 2**np.arange(1,7)
C_size = sys.getsizeof(C)
for k in prof_seq.data:
plt.plot(threads, prof_seq.data[k], label=k)
plt.loglog(basex=2, basey=2)
from matplotlib.ticker import FormatStrFormatter
plt.title(f'Single node parallelization one batch test. Task size: {C_size:e}')
plt.xlabel('Thread count')
plt.ylabel('Time')
minorticks()
plt.legend()
plt.savefig('figures/node_par_seqtest.pdf')
plt.close()
```
## Use unix tools
### Threading
```
x,y = get_example_task(A=20, B=9, C=8, dim1=2)
contract_idx = set(x[1]) & set(y[1])
result_idx = set(x[1] + y[1])
prof_thread = Profiler()
prof_thread.use_append()
for i in range(1):
C = contract(x,y)
C_size = sys.getsizeof(C)
target_shape = C.shape
C = None
for rank in range(1,7):
if rank==1:
with prof_thread.timing('Single thread'):
C = contract(x,y)
C = None
with prof_thread.timing('Multithread: total'):
par_vars = list(range(rank))
threads = 2**len(par_vars)
os.global_C = np.empty(target_shape)
with prof_thread.timing('Multithread: work'):
_ = pool.map(work, ((i,x,y,par_vars,result_idx)for i in range(threads)))
#assert np.array_equal(C, os.global_C)
_data = prof_thread.data
print(_data)
_data_knl = {'Single thread': [1.3409993648529053, 1.3587844371795654, 1.3243846893310547, 1.336273193359375, 1.3332529067993164, 1.3412296772003174], 'Multithread: work': [0.7453043460845947, 0.5046432018280029, 0.39226293563842773, 0.40014123916625977, 0.5875647068023682, 1.0763416290283203], 'Multithread: total': [0.7459092140197754, 0.5054154396057129, 0.3927571773529053, 0.4007418155670166, 0.588019847869873, 1.0771734714508057]}
_data_biggest = {'Single thread': [27.42847204208374, 26.855594873428345, 26.628530979156494, 26.862286806106567, 26.71247911453247, 27.049968957901], 'Multithread: work': [14.236661434173584, 7.511402368545532, 4.950175762176514, 3.012814521789551, 2.351712703704834, 1.994131088256836], 'Multithread: total': [14.23719048500061, 7.512014150619507, 4.950707912445068, 3.0133090019226074, 2.3522441387176514, 1.9946098327636719]}
#_data = _data_biggest
threads = 2**np.arange(1,7)
for k in _data:
plt.plot(threads, _data[k], label=k)
plt.loglog(basex=2, basey=2)
plt.yscale('linear')
from matplotlib.ticker import FormatStrFormatter
plt.title(f'Single node parallelization test. Task size: {C_size:e}')
plt.xlabel('Thread count')
plt.ylabel('Time')
minorticks()
plt.legend()
plt.savefig('figures/node_par_threadtest_biggest.pdf')
#plt.rcParams.update({"xtick.bottom" : True, "ytick.left" : True})
sns.set_style('whitegrid')
#sns.set()
_data_block = {
'28':{'Single thread': [4.890172481536865], 'Multithread: work': [5.31355881690979, 2.839036464691162, 1.6587004661560059, 1.4607517719268799, 1.1708364486694336, 1.3796212673187256], 'Multithread: total': [5.31405234336853, 2.839534282684326, 1.659132957458496, 1.4612171649932861, 1.1718018054962158, 1.380187749862671]}
,'29': {'Single thread': [12.708141088485718], 'Multithread: work': [12.543375015258789, 6.445459604263306, 3.702291250228882, 2.225062131881714, 1.7111496925354004, 1.9049854278564453], 'Multithread: total': [12.543986320495605, 6.445924997329712, 3.7027952671051025, 2.2256860733032227, 1.7118234634399414, 1.905548095703125]}
, '30': {'Single thread': [26.65827775001526], 'Multithread: work': [26.532104015350342, 13.471351146697998, 7.361323356628418, 4.6045496463775635, 2.9114484786987305, 2.138317108154297], 'Multithread: total': [26.532758712768555, 13.471930980682373, 7.363482475280762, 4.605044364929199, 2.91215181350708, 2.1388139724731445]}
, '31': {'Single thread': [54.215914249420166], 'Multithread: work': [53.743674755096436, 27.541589498519897, 15.45585584640503, 8.812772750854492, 5.398884296417236, 4.5649192333221436], 'Multithread: total': [53.74607563018799, 27.542162895202637, 15.456344604492188, 8.814988851547241, 5.399648427963257, 4.5654377937316895]}
, '32': {'Single thread': [107.05718398094177], 'Multithread: work': [106.85966396331787, 55.66744685173035, 31.097278356552124, 18.133748292922974, 10.42065167427063, 9.078657865524292], 'Multithread: total': [106.86018991470337, 55.669677734375, 31.099481344223022, 18.13595175743103, 10.421445369720459, 9.080750703811646]}
}
threads = 2**np.arange(1,7)
fig, axs = plt.subplots(1,1, figsize=(6,6))
colors = (plt.cm.gnuplot2(x) for x in np.linspace(.8,.2,len(_data_block)))
for size, _data in _data_block.items():
singl = _data['Single thread']
total = _data['Multithread: total']
c = next(colors)
plt.plot(threads, total, '-D',color=c, label=f'Tensor size {2**(4+int(size))/1e9:.2f}Gb')
plt.plot(threads, singl*len(threads), '--', alpha=.3, color=c )
#from matplotlib.ticker import FormatStrFormatter
plt.loglog(basex=2, basey=2)
#plt.yscale('linear')
plt.grid()
#minorticks()
ax = plt.gca()
#ax.yaxis.set_minor_locator(plt.ticker.LogLocator(base=10.0, subs='all'))
#ax.yaxis.set_minor_formatter(plt.ticker.NullFormatter())
plt.title(f'Single node contraction parallelization for different sizes')
plt.xlabel('Thread count')
plt.ylabel('Time')
plt.grid(True,which="both")
handles, labels = plt.gca().get_legend_handles_labels()
plt.legend(handles[::-1], labels[::-1], loc='upper right')
plt.savefig('figures/node_par_threadtest_gener_jlse.pdf')
#plt.rcParams.update({"xtick.bottom" : True, "ytick.left" : True})
sns.set_style('whitegrid')
#sns.set()
_data_block = {
'27':{'Single thread': [4.890172481536865], 'Multithread: work': [5.31355881690979, 2.839036464691162, 1.6587004661560059, 1.4607517719268799, 1.1708364486694336, 1.3796212673187256], 'Multithread: total': [5.31405234336853, 2.839534282684326, 1.659132957458496, 1.4612171649932861, 1.1718018054962158, 1.380187749862671]}
,'30': {'Single thread': [37.403658866882324], 'Multithread: work': [39.51915979385376, 21.37852430343628, 11.835341453552246, 7.165068864822388, 4.922534942626953, 4.410918235778809], 'Multithread: total': [39.519590854644775, 21.378950595855713, 11.83582329750061, 7.1655051708221436, 4.923001050949097, 4.411387205123901
]}
}
threads = 2**np.arange(1,7)
fig, axs = plt.subplots(1,1, figsize=(6,6))
colors = (plt.cm.gnuplot2(x) for x in np.linspace(.8,.2,len(_data_block)))
for size, _data in _data_block.items():
singl = _data['Single thread']
total = _data['Multithread: total']
c = next(colors)
plt.plot(threads, total, '-D',color=c, label=f'Tensor size {2**(4+int(size))/1e9:.2f}Gb')
plt.plot(threads, singl*len(threads), '--', alpha=.3, color=c )
#from matplotlib.ticker import FormatStrFormatter
plt.loglog(basex=2, basey=2)
#plt.yscale('linear')
plt.grid()
#minorticks()
ax = plt.gca()
#ax.yaxis.set_minor_locator(plt.ticker.LogLocator(base=10.0, subs='all'))
#ax.yaxis.set_minor_formatter(plt.ticker.NullFormatter())
plt.title(f'Single node contraction parallelization for different sizes')
plt.xlabel('Thread count')
plt.ylabel('Time')
plt.grid(True,which="both")
handles, labels = plt.gca().get_legend_handles_labels()
plt.legend(handles[::-1], labels[::-1], loc='upper right')
plt.savefig('figures/node_par_threadtest_gener_theta.pdf')
```
### Multiprocessing
```
flat_size = len(C.flatten())
with pyrof.timing('init array'):
os.global_C = np.empty(target_shape)
#os.global_C = tonumpyarray(Array('d', flat_size))
#us.global_C = os.global_C.reshape(target_shape)
pool = Pool(processes=threads)
print('inited pool')
with pyrof.timing('parallel work'):
print('started work')
_ = pool.map(work, range(threads))
C_size = sys.getsizeof(os.global_C)
print(f'result size: {C_size:e}')
assert np.array_equal(C, os.global_C)
del os.global_C
```
| github_jupyter |
##### Copyright 2021 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# TensorFlow Lite Model Analyzer
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/lite/guide/model_analyzer"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/guide/model_analyzer.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/guide/model_analyzer.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/guide/model_analyzer.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
TensorFlow Lite Model Analyzer API helps you analyze models in TensorFlow Lite format by listing a model's structure.
## Model Analyzer API
The following API is available for the TensorFlow Lite Model Analyzer.
```
tf.lite.experimental.Analyzer.analyze(model_path=None,
model_content=None,
gpu_compatibility=False)
```
You can find the API details from https://www.tensorflow.org/api_docs/python/tf/lite/experimental/Analyzer or run `help(tf.lite.experimental.Analyzer.analyze)` from a Python terminal.
## Basic usage with simple Keras model
The following code shows basic usage of Model Analyzer. It shows contents of the converted Keras model in TFLite model content, formatted as a flatbuffer object.
```
import tensorflow as tf
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(128, 128)),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
fb_model = tf.lite.TFLiteConverter.from_keras_model(model).convert()
tf.lite.experimental.Analyzer.analyze(model_content=fb_model)
```
## Basic usage with MobileNetV3Large Keras model
This API works with large models such as MobileNetV3Large. Since the output is large, you might want to browse it with your favorite text editor.
```
model = tf.keras.applications.MobileNetV3Large()
fb_model = tf.lite.TFLiteConverter.from_keras_model(model).convert()
tf.lite.experimental.Analyzer.analyze(model_content=fb_model)
```
## Check GPU delegate compatibility
The ModelAnalyzer API provides a way to check the [GPU delegate](https://www.tensorflow.org/lite/performance/gpu) compatibility of the given model by providing `gpu_compatibility=True` option.
### Case 1: When model is incompatibile
The following code shows a way to use `gpu_compatibility=True` option for simple tf.function which uses `tf.slice` with a 2D tensor and `tf.cosh` which are not compatible with GPU delegate.
You will see `GPU COMPATIBILITY WARNING` per every node which has compatibility issue(s).
```
import tensorflow as tf
@tf.function(input_signature=[
tf.TensorSpec(shape=[4, 4], dtype=tf.float32)
])
def func(x):
return tf.cosh(x) + tf.slice(x, [1, 1], [1, 1])
converter = tf.lite.TFLiteConverter.from_concrete_functions(
[func.get_concrete_function()], func)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS,
tf.lite.OpsSet.SELECT_TF_OPS,
]
fb_model = converter.convert()
tf.lite.experimental.Analyzer.analyze(model_content=fb_model, gpu_compatibility=True)
```
### Case 2: When model is compatibile
In this example, the given model is compatbile with GPU delegate.
**Note:** Even though the tool doesn't find any compatibility issue, it doesn't guarantee that your model works well with GPU delegate on every device. There could be some runtime incompatibililty happen such as missing `CL_DEVICE_IMAGE_SUPPORT` feature by target OpenGL backend.
```
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(128, 128)),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
fb_model = tf.lite.TFLiteConverter.from_keras_model(model).convert()
tf.lite.experimental.Analyzer.analyze(model_content=fb_model, gpu_compatibility=True)
```
| github_jupyter |
```
import warnings
warnings.simplefilter('ignore', FutureWarning)
import matplotlib
matplotlib.rcParams['axes.grid'] = True # show gridlines by default
%matplotlib inline
import pandas as pd
```
## Getting Comtrade data into your notebook
In this exercise, you will practice loading data from Comtrade into a pandas dataframe and getting it into a form where you can start to work with it.
The following steps and code are an example. Your task for this exercise is stated at the end, after the example.
The data is obtained from the [United Nations Comtrade](http://comtrade.un.org/data/) website, by selecting the following configuration:
- Type of Product: goods
- Frequency: monthly
- Periods: all of 2020
- Reporter: Kenya
- Partners: all
- Flows: imports and exports
- HS (as reported) commodity codes: 0401 (Milk and cream, neither concentrated nor sweetened) and 0402 (Milk and cream, concentrated or sweetened)
Clicking on 'Preview' results in a message that the data exceeds 500 rows. Data was downloaded using the *Download CSV* button and the download file renamed appropriately.
```
LOCATION ='comtrade_milk_kenya_monthly_2020.csv'
```
Load the data in from the specified location, ensuring that the various codes are read as strings. Preview the first few rows of the dataset.
```
milk = pd.read_csv(LOCATION, dtype={'Commodity Code':str, 'Reporter Code':str})
milk.head(5)
milk.tail(5)
#limit the columns
COLUMNS = ['Year', 'Period','Trade Flow','Reporter', 'Partner', 'Commodity','Commodity Code','Trade Value (US$)']
milk = milk[COLUMNS]
milk
```
Derive two new dataframes that separate out the 'World' partner data and the data for individual partner countries.
```
milk_world = milk[milk['Partner'] == 'World']
milk_countries = milk[milk['Partner'] != 'World']
#store as csv
milk_countries.to_csv('kenyamilk.csv',index=False)
```
To load the data back in:
```
load_test= pd.read_csv('kenyamilk.csv', dtype={'Commodity Code':str,'Reporter Code':str})
load_test.head(3)
```
### Subsetting Your Data
For large or heterogenous datasets, it is often convenient to create subsets of the data. To further separate out the imports:
```
milk_imports = milk[milk['Trade Flow'] == 'Imports']
milk_countries_imports = milk_countries[milk_countries['Trade Flow'] == 'Imports']
milk_world_imports=milk_world[milk_world['Trade Flow'] == 'Imports']
```
### Sorting the data
Having loaded in the data, find the most valuable partners in terms of import trade flow during a particular month by sorting the data by *decreasing* trade value and then selecting the top few rows.
```
milkImportsInJanuary2020 = milk_countries_imports[milk_countries_imports['Period'] == 202001]
milkImportsInJanuary2020.sort_values('Trade Value (US$)',ascending=False).head(10)
```
### Grouping the data
Split the data into two different subsets of data (imports and exports), by grouping on trade flow.
```
groups = milk_countries.groupby('Trade Flow')
groups.get_group('Imports').head()
```
As well as grouping on a single term, you can create groups based on multiple columns by passing in several column names as a list. For example, generate groups based on commodity code and trade flow, and then preview the keys used to define the groups
```
GROUPING_COMMFLOW = ['Commodity Code','Trade Flow']
groups = milk_countries.groupby(GROUPING_COMMFLOW)
groups.groups.keys()
```
Retrieve a group based on multiple group levels by passing in a tuple that specifies a value for each index column. For example, if a grouping is based on the 'Partner' and 'Trade Flow' columns, the argument of get_group has to be a partner/flow pair, like ('Uganda', 'Import') to get all rows associated with imports from Uganda.
```
GROUPING_PARTNERFLOW = ['Partner','Trade Flow']
groups = milk_countries.groupby(GROUPING_PARTNERFLOW)
GROUP_PARTNERFLOW= ('Uganda','Imports')
groups.get_group( GROUP_PARTNERFLOW )
```
To find the leading partner for a particular commodity, group by commodity, get the desired group, and then sort the result.
```
groups = milk_countries.groupby(['Commodity Code'])
groups.get_group('0402').sort_values("Trade Value (US$)", ascending=False).head()
```
| github_jupyter |
```
import requests
import json
url = "https://microsoft-computer-vision3.p.rapidapi.com/analyze"
querystring = {"language":"en","descriptionExclude":"Celebrities","visualFeatures":"ImageType,Categories,Description","details":"Celebrities"}
payload = "{\r\n \"url\": \"https://neilpatel.com/wp-content/uploads/2017/09/image-editing-tools.jpg\"\r\n}"
headers = {
'content-type': "application/json",
'x-rapidapi-key': "dacaae5850mshbcab4ca9a7b2a4dp13a15bjsnd9aaa23a2ee4",
'x-rapidapi-host': "microsoft-computer-vision3.p.rapidapi.com"
}
response = requests.request("POST", url, data=payload, headers=headers, params=querystring)
result = response.text
print(result)
type(response)
type(result)
data = json.loads(result)
print(data)
type(data)
data['description']['captions'][0]['text']
test = {
"book": [
{
"id":"01",
"language": "Java",
"edition": "third",
"author": "Herbert Schildt"
},
{
"id":"07",
"language": "C++",
"edition": "second",
"author": "E.Balagurusamy"
}
]
}
print(test)
type(test)
winner_record = {'marks':97,'name':'Winner','distinction':True}
winner_record['distinction']
records = {
'Maths':[
{'marks':97,'name':'Winner','distinction':True},
{'marks':99,'name':'Emeto','distinction':False}
],
'English':[
{'marks':97,'name':'Winner','distinction':True},
{'marks':99,'name':'Emeto','distinction':False}
]
}
age = [22,'Winner',2,76,12,16]
records['Maths'][1]['distinction']
for element in records['Maths']:
if element['name'] == 'Emeto':
print(element['distinction'])
else:
print('wrong record!')
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Product",
"description": "A product from Acme's catalog",
"type": "object",
"properties": {
"id": {
"description": "The unique identifier for a product",
"type": "integer"
},
"name": {
"description": "Name of the product",
"type": "string"
},
"price": {
"type": "number",
"minimum": 0,
"exclusiveMinimum": true
}
},
"required": ["id", "name", "price"]
}
'Maths':[
{'marks':97,'name':'Winner','distinction':True},
{'marks':99,'name':'Emeto','distinction':False}
[
{
"id": 2,
"name": "An ice sculpture",
"price": 12.50,
},
{
"id": 3,
"name": "A blue mouse",
"price": 25.50,
}
]
d = {}
d['Name'] = 'Winner Emeto'
d['Country'] = 'Nigeria'
var = json.dumps(d,ensure_ascii=False)
print(var)
type(var)
```
| github_jupyter |
```
import feather
import os
import re
import pickle
import time
import datetime
import numpy as np
import pandas as pd
from numba import jit
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import matthews_corrcoef
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.sparse import csr_matrix, hstack
from ml_toolbox.xgboostmonitor_utils import *
import ml_toolbox.xgboostmonitor_utils as xgbm
%matplotlib inline
import xgboost as xgb
import subprocess
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
# Custom modules
import const
import func
```
## Load data
```
y = func.read_last_column(os.path.join(const.BASE_PATH,const.TRAIN_FILES[0]+'.csv'))
print y.head(3)
y = y.Response.values
# Load columns name
num_cols = func.get_columns_csv(os.path.join(const.BASE_PATH, const.TRAIN_FILES[0]))[:200]
train_stack = feather.read_dataframe('divers/tr_stack1.feather')
#test_stack = feather.read_dataframe('divers/te_stack1.feather')
#tr_lauren = feather.read_dataframe('../input/tr_lauren.feather')
#te_lauren = feather.read_dataframe('../input/te_lauren.feather')
#leak = pd.read_csv('../input/leak_feature.csv')
tr_feather_set1 = feather.read_dataframe('divers/train.feather')
#te_feather_set1 = pd.read_csv('divers/test_eng.csv')
tr_feather_set1.columns = [x + '_v2' for x in tr_feather_set1.columns]
train = pd.concat([train_stack,tr_feather_set1],axis = 1)
set(train_stack.columns) & set(tr_feather_set1.columns)
features = list(train.columns)
features.remove("Y")
#features.remove("Id")
#features.remove("Id")
features.remove("Response")
#features.remove("tdeltadevrel_block1a")
features.remove("cluster_n500")
features.remove("unique_path")
features.remove('magic3')
features.remove('magic4')
X = train[features]
del train_stack,tr_feather_set1,train
import gc
gc.collect()
print('X_num_raw: {}'.format(X.shape))
print const.CV
with open(const.CV, 'rb') as f:
cv = pickle.load(f)
n_cv = len(cv)
n_cv
x_train = xgb.DMatrix(X,
label=y)
```
## Train simple model
```
def score_xgboost_full(params):
global counter
#print ('Params testing %d: %s' % (counter, params))
counter += 1
print('Predicting XGBoost score with ({}):'.format(counter))
print('\t {} samples'.format(x_train.num_row()))
print('\t {} features'.format(x_train.num_col()))
print('\t {} parameters'.format(params))
preds_val = np.zeros(y.shape)
for (itrain, ival) in cv:
x_tr = x_train.slice(itrain)
x_va = x_train.slice(ival)
watchlist = [ (x_tr, 'train'), (x_va, 'eval')]
eval_result = {}
bst = xgb.train(params,
x_tr,
num_boost_round=params['num_round'],
evals=watchlist,
evals_result=eval_result,
early_stopping_rounds=params['early_stopping'],
verbose_eval=5)
#print('\t score: {}'.format(roc_auc_score(y_val, y_pred_val)))
train_score = eval_result['train']['auc'][bst.best_iteration]
val_score = eval_result['eval']['auc'][bst.best_iteration]
# pick the best threshold based on oof predictions
preds_val[ival] = bst.predict(x_va, ntree_limit=bst.best_ntree_limit)
thresholds = np.linspace(0.01, 0.99, 50)
mcc = np.array([matthews_corrcoef(y[ival], preds_val[ival]>thr) for thr in thresholds])
th_val = thresholds[mcc.argmax()]
mcc_val = mcc.max()
print train_score
print val_score
print th_val
print mcc_val
return preds_val
def score_xgboost(params):
global counter
#print ('Params testing %d: %s' % (counter, params))
counter += 1
print('Predicting XGBoost score with ({}):'.format(counter))
print('\t {} samples'.format(x_train.num_row()))
print('\t {} features'.format(x_train.num_col()))
print('\t {} parameters'.format(params))
(itrain, ival) = cv[3]
x_tr = x_train.slice(itrain)
x_va = x_train.slice(ival)
watchlist = [ (x_tr, 'train'), (x_va, 'eval')]
eval_result = {}
bst = xgb.train(params,
x_tr,
num_boost_round=params['num_round'],
evals=watchlist,
evals_result=eval_result,
early_stopping_rounds=params['early_stopping'],
verbose_eval=5)
#print('\t score: {}'.format(roc_auc_score(y_val, y_pred_val)))
train_score = eval_result['train']['auc'][bst.best_iteration]
val_score = eval_result['eval']['auc'][bst.best_iteration]
# pick the best threshold based on oof predictions
preds_val = bst.predict(x_va, ntree_limit=bst.best_ntree_limit)
thresholds = np.linspace(0.01, 0.99, 50)
mcc = np.array([matthews_corrcoef(y[ival], preds_val>thr) for thr in thresholds])
th_val = thresholds[mcc.argmax()]
mcc_val = mcc.max()
print train_score
print val_score
print th_val
print mcc_val
return {'loss': 1-val_score,
'status': STATUS_OK,
'train_score': train_score,
'best_iter': bst.best_iteration,
'mcc': mcc_val,
'threshold': th_val}
params = {'max_depth': 7, 'eta':0.1, 'silent':1, 'objective':'binary:logistic' }
#param['nthread'] = 1
params['eval_metric'] = 'auc'
params['subsample'] = 0.9
params['colsample_bytree']= 0.8
params['min_child_weight'] = 12
params['booster'] = "gbtree"
params['seed'] = 1712
params['num_round'] = 200
params['early_stopping'] = 100
df = score_xgboost_full(params)
params = {'max_depth': 7, 'eta':0.1, 'silent':1, 'objective':'binary:logistic' }
#param['nthread'] = 1
params['eval_metric'] = 'auc'
params['subsample'] = hp.uniform('subsample', 0.7, 0.9) #,0.86
params['colsample_bytree']= hp.uniform('colsample_bytree', 0.7, 0.9) #0.92
params['min_child_weight'] = hp.choice('min_child_weight', range(50))
params['booster'] = "gbtree"
params['seed'] = 1712
params['num_round'] = 200
params['early_stopping'] = 30
# Hyperopt
trials = Trials()
counter = 0
best = fmin(score_xgboost,
params,
algo=tpe.suggest,
max_evals=200,
trials=trials)
par_values = {'max_depth': range(8,21)}
parameters = trials.trials[0]['misc']['vals'].keys()
f, axes = plt.subplots(nrows=2, ncols=2, figsize=(16,16))
cmap = plt.cm.Dark2
par_best_score = {}
df = pd.DataFrame(columns=parameters + ['train_auc','val_auc'])
for i, val in enumerate(parameters):
xs = np.array([t['misc']['vals'][val] for t in trials.trials if 'loss' in t['result']]).ravel()
val_auc = [1-t['result']['loss'] for t in trials.trials if 'loss' in t['result']]
train_auc = [t['result']['train_score'] for t in trials.trials if 'train_score' in t['result']]
best_iter = [t['result']['best_iter'] for t in trials.trials if 'best_iter' in t['result']]
mcc = [t['result']['mcc'] for t in trials.trials if 'mcc' in t['result']]
tr = [t['result']['threshold'] for t in trials.trials if 'threshold' in t['result']]
df[val] = xs
df['val_auc'] = val_auc
df['train_auc'] = train_auc
df['best_iter'] = best_iter
df['threshold'] = tr
df['mcc'] = mcc
par_best_score[val] = xs[val_auc.index(min(val_auc))]
#print trials.trials[ys.index(max(ys))]
#print i, val, max(ys)
#xs, ys = zip(sorted(xs), sorted(ys))
#ys = np.array(ys)
axes[i/2,i%2].scatter(xs, mcc, s=20, linewidth=0.01, alpha=0.5, c=cmap(float(i)/len(parameters)))
axes[i/2,i%2].set_title(val)
print par_best_score
df['diffs'] = df['train_auc'] - df['val_auc']
ax = df.plot.scatter('threshold','mcc')
#ax.set_xlim([0.921, 0.926])
ax = df.plot.scatter('val_auc','mcc')
ax.set_xlim([0.924, 0.928])
ax = df.plot.scatter('subsample','diffs')
#ax.set_xlim([0.924, 0.928])
ax = df.plot.scatter('colsample_bytree','diffs')
ax = df.plot.scatter('min_child_weight','diffs')
df.sort_values('mcc', ascending=False)
#df.drop(['gamma'], axis=1, inplace=True)
#df.to_csv('./data/xgboost_hyperopt_1fold_100iter.csv', index=False)
df['colsample_bytree'] = df['colsample_bytree'].round(2)
df.sort_values('val_auc', ascending=False)
df.head()
df['subsample'] = df['subsample'].round(2)
df['colsample_bytree'] = df['colsample_bytree'].round(2)
def plot_scores_for_pars(par):
f, ax = plt.subplots(1,3, figsize=(16,6), sharex=True)
df.groupby(par)['val_auc'].mean().plot(ax=ax[0])
df.groupby(par)['train_auc'].mean().plot(ax=ax[1])
df.groupby(par)['diffs'].mean().plot(ax=ax[2])
ax[0].set_ylabel('Test auc')
ax[1].set_ylabel('Train auc')
ax[2].set_ylabel('Difference')
ax[0].set_xlabel(par)
ax[1].set_xlabel(par)
ax[2].set_xlabel(par)
plot_scores_for_pars('subsample')
plot_scores_for_pars('colsample_bytree')
plot_scores_for_pars('min_child_weight')
plot_scores_for_pars('gamma')
plot_scores_for_pars('gamma')
df.groupby('sub_r')['val_auc'].mean().plot()
df.groupby('sub_r')['train_auc'].mean().plot()
df.groupby('colt_r')['val_auc'].mean().plot()
df.groupby('colt_r')['train_auc'].mean().plot()
df.groupby('coll_r')['val_auc'].mean().plot()
df.groupby('coll_r')['train_auc'].mean().plot()
df.plot('train_auc', 'val_auc',kind='scatter', ylim=[0.918, 0.922])
df.plot('val_auc', 'diffs', kind='scatter', xlim=[0.918, 0.922])
df.plot('gamma', 'diffs',kind='scatter')
df.plot.scatter('colsample_bytree', 'val_auc', by='max_depth')
```
| github_jupyter |
# Can fingerprint distances discriminate DFG conformations?
The `kissim` fingerprint encodes the pocket residues' spatial distance to four centers—the pocket centroid, hinge region, DFG region and front pocket—and should therefore discriminate between two structures in different conformations; when we compare two structures in *different* conformations the fingerprint distance should be higher than for two structures in *similar* conformations.
Let's check if this is true using DFG conformations from KLIFS. Plot distribution of fingerprint distances grouped by in/in, out/out, and in/out pairs.
- Use fingerprint distances for structure pairs between all kinases
- Use fingerprint distances for structure pairs between the same kinase
```
%load_ext autoreload
%autoreload 2
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import display, Markdown
from opencadd.databases.klifs import setup_remote
from kissim.comparison import FingerprintDistanceGenerator
from src.definitions import COVERAGE_CUTOFF
from src.paths import PATH_RESULTS
HERE = Path(_dh[-1]) # noqa: F821
DATA = PATH_RESULTS / "all"
plt.style.use("seaborn")
```
## Load fingerprint distances with sufficient coverage
Choose fingerprint distances that are based on spatial distances only (**weighting scheme: 010**) and that are based on a sufficient pairwise fingerprint bit coverage (default: `COVERAGE_CUTOFF`).
```
COVERAGE_CUTOFF
# Set path
fingerprint_distance_file = DATA / "fingerprint_distances_010.csv.bz2"
# Load data
fingerprint_distance_generator = FingerprintDistanceGenerator.from_csv(fingerprint_distance_file)
print(f"Number of kinases: {len(fingerprint_distance_generator.kinase_ids)}")
print(f"Number of structures: {len(fingerprint_distance_generator.structure_ids)}")
structure_distances = fingerprint_distance_generator.data
print(f"Number of structure pairs: {structure_distances.shape[0]}")
structure_distances = structure_distances[
structure_distances["bit_coverage"] >= COVERAGE_CUTOFF
].reset_index(drop=True)
print(f"Number of structure pairs: {structure_distances.shape[0]}")
structure_distances.head()
```
## Add DFG conformation
Add DFG conformation from KLIFS to each structure pair.
```
def get_dfg(dfg, structure_klifs_id):
try:
return dfg[structure_klifs_id]
except KeyError:
return None
%%time
klifs_session = setup_remote()
structures = klifs_session.structures.all_structures()
dfg = structures.set_index("structure.klifs_id")["structure.dfg"]
structure_distances["dfg.1"] = structure_distances["structure.1"].apply(lambda x: get_dfg(dfg, x))
structure_distances["dfg.2"] = structure_distances["structure.2"].apply(lambda x: get_dfg(dfg, x))
structure_distances.head()
```
## Plot DFG conformation pairs
Group the structure pairs by DFG conformation pairs—in/in, out/out, in/out—and plot their fingerprint distance distributions.
```
def structure_distances_by_dfg_conformation_pairs(structure_distances):
"""Distances for all, in/in, out/out, and in/out structure pairs."""
dfg_in_in = structure_distances[
(structure_distances["dfg.1"] == "in") & (structure_distances["dfg.2"] == "in")
]["distance"]
dfg_out_out = structure_distances[
(structure_distances["dfg.1"] == "out") & (structure_distances["dfg.2"] == "out")
]["distance"]
dfg_in_out = structure_distances[
((structure_distances["dfg.1"] == "in") & (structure_distances["dfg.2"] == "out"))
| ((structure_distances["dfg.1"] == "out") & (structure_distances["dfg.2"] == "in"))
]["distance"]
structure_distances_dfg = pd.DataFrame(
{"in/in": dfg_in_in, "out/out": dfg_out_out, "in/out": dfg_in_out}
)
structure_distances_dfg = pd.DataFrame(structure_distances_dfg)
return structure_distances_dfg
def plot_structure_distances_by_dfg_conformation_pairs(structure_distances, kinase):
"""Plot distribution of structure distances per DFG conformation pair."""
# Data
structure_distances_dfg = structure_distances_by_dfg_conformation_pairs(structure_distances)
print("Number of structure pairs per conformation pair:")
print(structure_distances_dfg.notna().sum())
# Boxplot
fig, ax = plt.subplots(1, 1, figsize=(3.33, 3.33))
structure_distances_dfg.plot(kind="box", ax=ax)
ax.set_xlabel(
"Type of DFG conformation pairs"
if kinase is None
else f"Type of DFG conformation pairs ({kinase})"
)
ax.set_ylabel("Fingerprint distances (spatial features only)")
if kinase is None:
fig.savefig(
HERE / "../figures/kissim_discriminates_dfg.png",
dpi=300,
bbox_inches="tight",
)
else:
fig.savefig(
HERE / f"../figures/kissim_discriminates_dfg_{kinase}.png",
dpi=300,
bbox_inches="tight",
)
plt.show()
# Stats
display(structure_distances_dfg.describe())
```
### All structures
Use fingerprint distances for structure pairs between all kinases.
```
plot_structure_distances_by_dfg_conformation_pairs(structure_distances, kinase=None)
```
<div class="alert alert-block alert-info">
When including all kinases at the same time, the distribution of fingerprint distances is similar for structure pairs with the same DFG conformations (in/in and out/out) and different DFG conformations (in/out).
The fingerprint seems not to discriminate DFG-conformations on a kinome-wide level, maybe because the encoded spatial information is not restricted to only DFG conformation features. We may see a disciminative effect when comparing structures for a single kinase.
</div>
### Structures for one kinase
Use fingerprint distances for structure pairs between the same kinase; use only kinases that have a sufficient number of structures in DFG-in and DFG-out conformations (default: 10).
```
def kinases_with_high_dfg_in_out_coverage(structure_distances, dfg_structure_coverage_cutoff=10):
"""Given a dataset, get kinases with a threshold DFG in/out coverage."""
# Get structure KLIFS IDs in our dataset
structure_klifs_ids = (
pd.concat(
[
structure_distances["structure.1"].drop_duplicates(),
structure_distances["structure.2"].drop_duplicates(),
]
)
.drop_duplicates()
.to_list()
)
print(f"Number of structures: {len(structure_klifs_ids)}")
# Get structural metadata
klifs_session = setup_remote()
structures = klifs_session.structures.all_structures()
structures = structures[structures["structure.klifs_id"].isin(structure_klifs_ids)]
# Count number of structures per kinase and conformation
dfg_by_kinase = structures.groupby("kinase.klifs_name").apply(
lambda x: x["structure.dfg"].value_counts()
)
dfg_by_kinase = dfg_by_kinase.reset_index()
dfg_by_kinase.columns = ["kinase", "dfg", "n_structures"]
# Keep only in/out rows
dfg_by_kinase = dfg_by_kinase[(dfg_by_kinase["dfg"] == "in") | (dfg_by_kinase["dfg"] == "out")]
# Keep only rows with at least xxx structures
dfg_by_kinase = dfg_by_kinase[dfg_by_kinase["n_structures"] >= dfg_structure_coverage_cutoff]
# Keep only kinases with both in/out conformations
n_conformations_by_kinase = dfg_by_kinase.groupby("kinase").size()
dfg_by_kinase = dfg_by_kinase[
dfg_by_kinase["kinase"].isin(
n_conformations_by_kinase[n_conformations_by_kinase == 2].index
)
]
return dfg_by_kinase.set_index(["kinase", "dfg"])
dfg_by_kinase = kinases_with_high_dfg_in_out_coverage(
structure_distances, dfg_structure_coverage_cutoff=10
)
dfg_by_kinase
for kinase, dfg in dfg_by_kinase.reset_index().groupby("kinase"):
display(Markdown(f"#### {kinase}"))
dfg = dfg.set_index("dfg")
n_dfg_in = dfg.loc["in", "n_structures"]
n_dfg_out = dfg.loc["out", "n_structures"]
print(f"Number of DFG-in structures: {n_dfg_in}")
print(f"Number of DFG-out structures: {n_dfg_out}")
dfg_in_percentage = round(n_dfg_in / (n_dfg_in + n_dfg_out) * 100, 2)
print(f"Percentage of DFG-in: {dfg_in_percentage}%")
structure_distances_by_kinase = structure_distances[
(structure_distances["kinase.1"] == kinase) & (structure_distances["kinase.2"] == kinase)
].reset_index(drop=True)
plot_structure_distances_by_dfg_conformation_pairs(structure_distances_by_kinase, kinase)
```
<div class="alert alert-block alert-info">
We compare here only fingerprint distances for pairs of structures that describe the same kinase. We observe two interesting shifts:
1. The distribution for out/out pairs is overall lower than for in/in pairs. Potential explanations: definitions for DFG-out are stricter than for DFG-in; "real" diversity of DFG-out structures could be still unknown due to the lower number of structures for DFG-out than for DFG-in.
2. The distribution of different DFG conformations (in/out) is overall higher than for equal DFG conformations (in/in and out/out). The fingerprint can discriminate DFG conformations of the same kinase.
</div>
| github_jupyter |
```
import datetime as dt
import numpy as np
import pandas as pd
import panel as pn
pn.extension('tabulator')
```
The ``Tabulator`` widget allows displaying and editing a pandas DataFrame. The `Tabulator` is a largely backward compatible replacement for the [`DataFrame`](./DataFrame.ipynb) widget and will eventually replace it. It is built on the [Tabulator](http://tabulator.info/) library, which provides for a wide range of features.
For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb).
#### Parameters:
For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb).
##### Core
* **``aggregators``** (``dict``): A dictionary mapping from index name to an aggregator to be used for `hierarchical` multi-indexes (valid aggregators include 'min', 'max', 'mean' and 'sum'). If separate aggregators for different columns are required the dictionary may be nested as `{index_name: {column_name: aggregator}}`
* **``configuration``** (``dict``): A dictionary mapping used to specify tabulator options not explicitly exposed by panel.
* **``editors``** (``dict``): A dictionary mapping from column name to a bokeh `CellEditor` instance or tabulator editor specification.
* **``embed_content``** (``boolean``): Whether to embed the `row_content` or to dynamically fetch it when a row is expanded.
* **``expanded``** (``list``): The currently expanded rows as a list of integer indexes.
* **``filters``** (``list``): A list of client-side filter definitions that are applied to the table.
* **``formatters``** (``dict``): A dictionary mapping from column name to a bokeh `CellFormatter` instance or tabulator formatter specification.
* **``groupby``** (`list`): Groups rows in the table by one or more columns.
* **``header_align``** (``dict`` or ``str``): A mapping from column name to header alignment or a fixed header alignment, which should be one of `'left'`, `'center'`, `'right'`.
* **``header_filters``** (``boolean``/``dict``): A boolean enabling filters in the column headers or a dictionary providing filter definitions for specific columns.
* **``hierarchical``** (boolean, default=False): Whether to render multi-indexes as hierarchical index (note hierarchical must be enabled during instantiation and cannot be modified later)
* **``hidden_columns``** (`list`): List of columns to hide.
* **``layout``** (``str``, `default='fit_data_table'`): Describes the column layout mode with one of the following options `'fit_columns'`, `'fit_data'`, `'fit_data_stretch'`, `'fit_data_fill'`, `'fit_data_table'`.
* **``frozen_columns``** (`list`): List of columns to freeze, preventing them from scrolling out of frame. Column can be specified by name or index.
* **``frozen_rows``**: (`list`): List of rows to freeze, preventing them from scrolling out of frame. Rows can be specified by positive or negative index.
* **``page``** (``int``, `default=1`): Current page, if pagination is enabled.
* **``page_size``** (``int``, `default=20`): Number of rows on each page, if pagination is enabled.
* **``pagination``** (`str`, `default=None`): Set to `'local` or `'remote'` to enable pagination; by default pagination is disabled with the value set to `None`.
* **``row_content``** (``callable``): A function that receives the expanded row as input and should return a Panel object to render into the expanded region below the row.
* **``row_height``** (``int``, `default=30`): The height of each table row.
* **``selection``** (``list``): The currently selected rows as a list of integer indexes.
* **``selectable``** (`boolean` or `str` or `int`, `default=True`): Defines the selection mode:
* `True`
Selects rows on click. To select multiple use Ctrl-select, to select a range use Shift-select
* `False`
Disables selection
* `'checkbox'`
Adds a column of checkboxes to toggle selections
* `'checkbox-single'`
Same as 'checkbox' but header does not alllow select/deselect all
* `'toggle'`
Selection toggles when clicked
* `int`
The maximum number of selectable rows.
* **``selectable_rows``** (`callable`): A function that should return a list of integer indexes given a DataFrame indicating which rows may be selected.
* **``show_index``** (``boolean``, `default=True`): Whether to show the index column.
* **``text_align``** (``dict`` or ``str``): A mapping from column name to alignment or a fixed column alignment, which should be one of `'left'`, `'center'`, `'right'`.
* **`theme`** (``str``, `default='simple'`): The CSS theme to apply (note that changing the theme will restyle all tables on the page), which should be one of `'default'`, `'site'`, `'simple'`, `'midnight'`, `'modern'`, `'bootstrap'`, `'bootstrap4'`, `'materialize'`, `'bulma'`, `'semantic-ui'`, or `'fast'`.
* **``titles``** (``dict``): A mapping from column name to a title to override the name with.
* **``value``** (``pd.DataFrame``): The pandas DataFrame to display and edit
* **``widths``** (``dict``): A dictionary mapping from column name to column width in the rendered table.
##### Display
* **``disabled``** (``boolean``): Whether the widget is editable
* **``name``** (``str``): The title of the widget
##### Properties
* **``current_view``** (``DataFrame``): The current view of the table that is displayed, i.e. after sorting and filtering are applied
* **``selected_dataframe``** (``DataFrame``): A DataFrame reflecting the currently selected rows.
___
The ``Tabulator`` widget renders a DataFrame using an interactive grid, which allows directly editing the contents of the dataframe in place, with any changes being synced with Python. The `Tabulator` will usually determine the appropriate formatter appropriately based on the type of the data:
```
df = pd.DataFrame({
'int': [1, 2, 3],
'float': [3.14, 6.28, 9.42],
'str': ['A', 'B', 'C'],
'bool': [True, False, True],
'date': [dt.date(2019, 1, 1), dt.date(2020, 1, 1), dt.date(2020, 1, 10)],
'datetime': [dt.datetime(2019, 1, 1, 10), dt.datetime(2020, 1, 1, 12), dt.datetime(2020, 1, 10, 13)]
}, index=[1, 2, 3])
df_widget = pn.widgets.Tabulator(df)
df_widget
```
## Formatters
By default the widget will pick bokeh ``CellFormatter`` and ``CellEditor`` types appropriate to the dtype of the column. These may be overriden by explicit dictionaries mapping from the column name to the editor or formatter instance. For example below we create a ``SelectEditor`` instance to pick from four options in the ``str`` column and a ``NumberFormatter`` to customize the formatting of the float values:
```
from bokeh.models.widgets.tables import NumberFormatter, BooleanFormatter
bokeh_formatters = {
'float': NumberFormatter(format='0.00000'),
'bool': BooleanFormatter(),
}
pn.widgets.Tabulator(df, formatters=bokeh_formatters)
```
The list of valid Bokeh formatters includes:
* [BooleanFormatter](https://docs.bokeh.org/en/latest/docs/reference/models/widgets.tables.html#bokeh.models.widgets.tables.BooleanFormatter)
* [DateFormatter](https://docs.bokeh.org/en/latest/docs/reference/models/widgets.tables.html#bokeh.models.widgets.tables.DateFormatter)
* [NumberFormatter](https://docs.bokeh.org/en/latest/docs/reference/models/widgets.tables.html#bokeh.models.widgets.tables.NumberFormatter)
* [HTMLTemplateFormatter](https://docs.bokeh.org/en/latest/docs/reference/models/widgets.tables.html#bokeh.models.widgets.tables.HTMLTemplateFormatter)
* [StringFormatter](https://docs.bokeh.org/en/latest/docs/reference/models/widgets.tables.html#bokeh.models.widgets.tables.StringFormatter)
* [ScientificFormatter](https://docs.bokeh.org/en/latest/docs/reference/models/widgets.tables.html#bokeh.models.widgets.tables.ScientificFormatter)
However in addition to the formatters exposed by Bokeh it is also possible to provide valid formatters built into the Tabulator library. These may be defined either as a string or as a dictionary declaring the 'type' and other arguments, which are passed to Tabulator as the `formatterParams`:
```
tabulator_formatters = {
'float': {'type': 'progress', 'max': 10},
'bool': {'type': 'tickCross'}
}
pn.widgets.Tabulator(df, formatters=tabulator_formatters)
```
The list of valid Tabulator formatters can be found in the [Tabulator documentation](http://tabulator.info/docs/4.9/format#format-builtin).
## Editors
Just like the formatters, the `Tabulator` will natively understand the Bokeh `Editor` types. However, in the background it will replace most of them with equivalent editors natively supported by the tabulator library:
```
from bokeh.models.widgets.tables import CheckboxEditor, NumberEditor, SelectEditor, DateEditor, TimeEditor
bokeh_editors = {
'float': NumberEditor(),
'bool': CheckboxEditor(),
'str': SelectEditor(options=['A', 'B', 'C', 'D']),
}
pn.widgets.Tabulator(df[['float', 'bool', 'str']], editors=bokeh_editors)
```
Therefore it is often preferable to use one of the [Tabulator editors](http://tabulator.info/docs/5.0/edit#edit) directly. Note that in addition to the standard Tabulator editors the Tabulator widget also supports `'date'` and `'datetime'` editors:
```
from bokeh.models.widgets.tables import CheckboxEditor, NumberEditor, SelectEditor
bokeh_editors = {
'float': {'type': 'number', 'max': 10, 'step': 0.1},
'bool': {'type': 'tickCross', 'tristate': True, 'indeterminateValue': None},
'str': {'type': 'autocomplete', 'values': True},
'date': 'date',
'datetime': 'datetime'
}
edit_table = pn.widgets.Tabulator(df, editors=bokeh_editors)
edit_table
```
When editing a cell the data stored on the `Tabulator.value` is updated and you can listen to any changes using the usual `.param.watch(callback, 'value')` mechanism. However if you need to know precisely which cell was changed you may also attach an `on_edit` callback which will be passed a `TableEditEvent` containing the:
- `column`: Name of the edited column
- `row`: Integer index of the edited row
- `value`: The updated value
```
edit_table.on_edit(lambda e: print(e.column, e.row, e.value))
```
### Column layouts
By default the DataFrame widget will adjust the sizes of both the columns and the table based on the contents, reflecting the default value of the parameter: `layout="fit_data_table"`. Alternative modes allow manually specifying the widths of the columns, giving each column equal widths, or adjusting just the size of the columns.
#### Manual column widths
To manually adjust column widths provide explicit `widths` for each of the columns:
```
custom_df = pd._testing.makeMixedDataFrame()
pn.widgets.Tabulator(custom_df, widths={'index': 70, 'A': 50, 'B': 50, 'C': 70, 'D': 130})
```
You can also declare a single width for all columns this way:
```
pn.widgets.Tabulator(custom_df, widths=130)
```
#### Autosize columns
To automatically adjust the columns dependending on their content set `layout='fit_data'`:
```
pn.widgets.Tabulator(custom_df, layout='fit_data', width=400)
```
To ensure that the table fits all the data but also stretches to fill all the available space, set `layout='fit_data_stretch'`:
```
pn.widgets.Tabulator(custom_df, layout='fit_data_stretch', width=400)
```
The `'fit_data_fill'` option on the other hand won't stretch the last column but still fill the space:
```
pn.widgets.Tabulator(custom_df, layout='fit_data_fill', width=400)
```
Perhaps the most useful of these options is `layout='fit_data_table'` (and therefore the default) since this will automatically size both the columns and the table:
```
pn.widgets.Tabulator(custom_df, layout='fit_data_table')
```
#### Equal size
The simplest option is simply to allocate each column equal amount of size:
```
pn.widgets.Tabulator(custom_df, layout='fit_columns', width=650)
```
## Alignment
The content of a column or its header can be horizontally aligned with `text_align` and `header_align`. These two parameters accept either a string that globally defines the alignment or a dictionnary that declares which particular columns are meant to be aligned and how.
```
pn.widgets.Tabulator(df, header_align='center', text_align={'str': 'right', 'bool': 'center'}, widths=200)
```
## Styling
The ability to style the contents of a table based on its content and other considerations is very important. Thankfully pandas provides a powerful [styling API](https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html), which can be used in conjunction with the `Tabulator` widget. Specifically the `Tabulator` widget exposes a `.style` attribute just like a `pandas.DataFrame` which lets the user apply custom styling using methods like `.apply` and `.applymap`. For a detailed guide to styling see the [Pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html).
Here we will demonstrate with a simple example, starting with a basic table:
```
style_df = pd.DataFrame(np.random.randn(10, 5), columns=list('ABCDE'))
styled = pn.widgets.Tabulator(style_df)
```
Next we define two functions which apply styling cell-wise (`color_negative_red`) and column-wise (`highlight_max`), which we then apply to the `Tabulator` using the `.style` API and then display the `styled` table:
```
def color_negative_red(val):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
color = 'red' if val < 0 else 'black'
return 'color: %s' % color
def highlight_max(s):
'''
highlight the maximum in a Series yellow.
'''
is_max = s == s.max()
return ['background-color: yellow' if v else '' for v in is_max]
styled.style.applymap(color_negative_red).apply(highlight_max)
styled
```
## Theming
The Tabulator library ships with a number of themes, which are defined as CSS stylesheets. For that reason changing the theme on one table will affect all Tables on the page and it will usually be preferable to see the theme once at the class level like this:
```python
pn.widgets.Tabulator.theme = 'default'
```
For a full list of themes see the [Tabulator documentation](http://tabulator.info/docs/4.9/theme), however the default themes include:
- `'simple'`
- `'default'`
- `'midnight'`
- `'site'`
- `'modern'`
- `'bootstrap'`
- `'bootstrap4'`
- `'materialize'`
- `'semantic-ui'`
- `'bulma'`
## Selection
The `selection` parameter controls which rows in the table are selected and can be set from Python and updated by selecting rows on the frontend:
```
sel_df = pd.DataFrame(np.random.randn(10, 5), columns=list('ABCDE'))
select_table = pn.widgets.Tabulator(sel_df, selection=[0, 3, 7])
select_table
```
Once initialized, the ``selection`` parameter will return the integer indexes of the selected rows, while the ``selected_dataframe`` property will return a new DataFrame containing just the selected rows:
```
select_table.selection = [1, 4, 9]
select_table.selected_dataframe
```
The `selectable` parameter declares how the selections work.
- `True`: Selects rows on click. To select multiple use Ctrl-select, to select a range use Shift-select
- `False`: Disables selection
- `'checkbox'`: Adds a column of checkboxes to toggle selections
- `'checkbox-single'`: Same as `'checkbox'` but disables (de)select-all in the header
- `'toggle'`: Selection toggles when clicked
- Any positive `int`: A number that sets the maximum number of selectable rows
```
pn.widgets.Tabulator(sel_df, selection=[0, 3, 7], selectable='checkbox')
```
Additionally we can also disable selection for specific rows by providing a `selectable_rows` function. The function must accept a DataFrame and return a list of integer indexes indicating which rows are selectable, e.g. here we disable selection for every second row:
```
pn.widgets.Tabulator(sel_df, selectable_rows=lambda df: list(range(0, len(df), 2)))
```
### Freezing rows and columns
Sometimes your table will be larger than can be displayed in a single viewport, in which case scroll bars will be enabled. In such cases, you might want to make sure that certain information is always visible. This is where the `frozen_columns` and `frozen_rows` options come in.
#### Frozen columns
When you have a large number of columns and can't fit them all on the screen you might still want to make sure that certain columns do not scroll out of view. The `frozen_columns` option makes this possible by specifying a list of columns that should be frozen, e.g. `frozen_columns=['index']` will freeze the index column:
```
wide_df = pd._testing.makeCustomDataframe(10, 10, r_idx_names=['index'])
pn.widgets.Tabulator(wide_df, frozen_columns=['index'], width=400)
```
#### Frozen rows
Another common scenario is when you have certain rows with special meaning, e.g. aggregates that summarize the information in the rest of the table. In this case you may want to freeze those rows so they do not scroll out of view. You can achieve this by setting a list of `frozen_rows` by integer index (which can be positive or negative, where negative values are relative to the end of the table):
```
date_df = pd._testing.makeTimeDataFrame().iloc[:10]
agg_df = pd.concat([date_df, date_df.median().to_frame('Median').T, date_df.mean().to_frame('Mean').T])
agg_df.index= agg_df.index.map(str)
pn.widgets.Tabulator(agg_df, frozen_rows=[-2, -1], width=400)
```
## Row contents
A table can only display so much information without becoming difficult to scan. We may want to render additional information to a table row to provide additional context. To make this possible you can provide a `row_content` function which is given the table row as an argument and should return a panel object that will be rendered into an expanding region below the row. By default the contents are fetched dynamically whenever a row is expanded, however using the `embed_content` parameter we can embed all the content.
Below we create a periodic table of elements where the Wikipedia page for each element will be rendered into the expanded region:
```
from bokeh.sampledata.periodic_table import elements
periodic_df = elements[['atomic number', 'name', 'atomic mass', 'metal', 'year discovered']].set_index('atomic number')
content_fn = lambda row: pn.pane.HTML(
f'<iframe src="http://en.wikipedia.org/wiki/{row["name"]}?printable=yes" width="100%" height="300px"></iframe>',
sizing_mode='stretch_width'
)
periodic_table = pn.widgets.Tabulator(
periodic_df, height=500, layout='fit_columns', sizing_mode='stretch_width',
row_content=content_fn, embed_content=True
)
periodic_table
```
The currently expanded rows can be accessed (and set) on the `expanded` parameter:
```
periodic_table.expanded
```
## Grouping
Another useful option is the ability to group specific rows together, which can be achieved using `groups` parameter. The `groups` parameter should be composed of a dictionary mapping from the group titles to the column names:
```
pn.widgets.Tabulator(date_df, groups={'Group 1': ['A', 'B'], 'Group 2': ['C', 'D']})
```
## Groupby
In addition to grouping columns we can also group rows by the values along one or more columns:
```
from bokeh.sampledata.autompg import autompg
pn.widgets.Tabulator(autompg, groupby=['yr', 'origin'], height=240)
```
### Hierarchical Multi-index
The `Tabulator` widget can also render a hierarchical multi-index and aggregate over specific categories. If a DataFrame with a hierarchical multi-index is supplied and the `hierarchical` is enabled the widget will group data by the categories in the order they are defined in. Additionally for each group in the multi-index an aggregator may be provided which will aggregate over the values in that category.
For example we may load population data for locations around the world broken down by sex and age-group. If we specify aggregators over the 'AgeGrp' and 'Sex' indexes we can see the aggregated values for each of those groups (note that we do not have to specify an aggregator for the outer index since we specify the aggregators over the subgroups in this case the 'Sex'):
```
from bokeh.sampledata.population import data as population_data
pop_df = population_data[population_data.Year == 2020].set_index(['Location', 'AgeGrp', 'Sex'])[['Value']]
pn.widgets.Tabulator(value=pop_df, hierarchical=True, aggregators={'Sex': 'sum', 'AgeGrp': 'sum'}, height=400)
```
## Pagination
When working with large tables we sometimes can't send all the data to the browser at once. In these scenarios we can enable pagination, which will fetch only the currently viewed data from the server backend. This may be enabled by setting `pagination='remote'` and the size of each page can be set using the `page_size` option:
```
large_df = pd._testing.makeCustomDataframe(100000, 5)
%%time
paginated_table = pn.widgets.Tabulator(large_df, pagination='remote', page_size=10)
paginated_table
```
Contrary to the `'remote'` option, `'local'` pagination entirely loads the data but still allows to display it on multiple pages.
```
%%time
medium_df = pd._testing.makeCustomDataframe(10000, 5)
paginated_table = pn.widgets.Tabulator(large_df, pagination='local', page_size=10)
paginated_table
```
## Filtering
A very common scenario is that you want to attach a number of filters to a table in order to view just a subset of the data. You can achieve this through callbacks or other reactive approaches but the `.add_filter` method makes it much easier.
#### Constant and Widget filters
The simplest approach to filtering is to select along a column with a constant or dynamic value. The `.add_filter` method allows passing in constant values, widgets and parameters. If a widget or parameter is provided the table will watch the object for changes in the value and update the data in response. The filtering will depend on the type of the constant or dynamic value:
- scalar: Filters by checking for equality
- `tuple`: A tuple will be interpreted as range.
- `list`/`set`: A list or set will be interpreted as a set of discrete scalars and the filter will check if the values in the column match any of the items in the list.
As an example we will create a DataFrame with some data of mixed types:
```
mixed_df = pd._testing.makeMixedDataFrame()
filter_table = pn.widgets.Tabulator(mixed_df)
filter_table
```
Now we will start adding filters one-by-one, e.g. to start with we add a filter for the `'A'` column, selecting a range from 0 to 3:
```
filter_table.add_filter((0, 3), 'A')
```
Next we add dynamic widget based filter, a `RangeSlider` which allows us to further narrow down the data along the `'A'` column:
```
slider = pn.widgets.RangeSlider(start=0, end=3, name='A Filter')
filter_table.add_filter(slider, 'A')
```
Lastly we will add a `MultiSelect` filter along the `'C'` column:
```
select = pn.widgets.MultiSelect(options=['foo1', 'foo2', 'foo3', 'foo4', 'foo5'], name='C Filter')
filter_table.add_filter(select, 'C')
```
Now let's display the table alongside the widget based filters:
```
pn.Row(
pn.Column(slider, select),
filter_table
)
```
After filtering you can inspect the current view with the `current_view` property:
```
filter_table.current_view
```
#### Function based filtering
For more complex filtering tasks you can supply a function that should accept the DataFrame to be filtered as the first argument and must return a filtered copy of the data. Let's start by loading some data.
```
import sqlite3
from bokeh.sampledata.movies_data import movie_path
con = sqlite3.Connection(movie_path)
movies_df = pd.read_sql('SELECT Title, Year, Genre, Director, Writer, imdbRating from omdb', con)
movies_df = movies_df[~movies_df.Director.isna()]
movies_table = pn.widgets.Tabulator(movies_df, pagination='remote', layout='fit_columns', width=800)
```
By using the `pn.bind` function, which binds widget and parameter values to a function, complex filtering can be achieved. E.g. here we will add a filter function that uses tests whether the string or regex is contained in the 'Director' column of a listing of thousands of movies:
```
director_filter = pn.widgets.TextInput(name='Director filter', value='Chaplin')
def contains_filter(df, pattern, column):
if not pattern:
return df
return df[df[column].str.contains(pattern)]
movies_table.add_filter(pn.bind(contains_filter, pattern=director_filter, column='Director'))
pn.Row(director_filter, movies_table)
```
### Client-side filtering
In addition to the Python API the Tabulator widget also offers a client-side filtering API, which can be exposed through `header_filters` or by manually adding filters to the rendered Bokeh model. The API for declaring header filters is almost identical to the API for defining [Editors](#Editors). The `header_filters` can either be enabled by setting it to `True` or by manually supplying filter types for each column. The types of filters supports all the same options as the editors, in fact if you do not declare explicit `header_filters` the tabulator will simply use the defined `editors` to determine the correct filter type:
```
bokeh_editors = {
'float': {'type': 'number', 'max': 10, 'step': 0.1},
'bool': {'type': 'tickCross', 'tristate': True, 'indeterminateValue': None},
'str': {'type': 'autocomplete', 'values': True}
}
header_filter_table = pn.widgets.Tabulator(
df[['float', 'bool', 'str']], height=140, width=400, layout='fit_columns',
editors=bokeh_editors, header_filters=True
)
header_filter_table
```
When a filter is applied client-side the `filters` parameter is synced with Python. The definition of `filters` looks something like this:
```
[{'field': 'Director', 'type': '=', 'value': 'Steven Spielberg'}]
```
Try applying a filter and then inspect the `filters` parameter:
```
header_filter_table.filters
```
For all supported filtering types see the [Tabulator Filtering documentation](http://tabulator.info/docs/4.9/filter).
If we want to change the filter type for the `header_filters` we can do so in the definition by supplying a dictionary indexed by the column names and then either providing a dictionary which may define the `'type'`, a comparison `'func'`, a `'placeholder'` and any additional keywords supported by the particular filter type.
```
movie_filters = {
'Title': {'type': 'input', 'func': 'like', 'placeholder': 'Enter title'},
'Year': {'placeholder': 'Enter year'},
'Genre': {'type': 'input', 'func': 'like', 'placeholder': 'Enter genre'},
'Director': {'type': 'input', 'func': 'like', 'placeholder': 'Enter director'},
'Writer': {'type': 'input', 'func': 'like', 'placeholder': 'Enter writer'},
'imdbRating': {'type': 'number', 'func': '>=', 'placeholder': 'Enter minimum rating'}
}
filter_table = pn.widgets.Tabulator(
movies_df, pagination='remote', layout='fit_columns', page_size=10, sizing_mode='stretch_width',
header_filters=movie_filters
)
filter_table
```
## Downloading
The `Tabulator` also supports triggering a download of the data as a CSV or JSON file dependending on the filename. The download can be triggered with the `.download()` method, which optionally accepts the filename as the first argument.
To trigger the download client-side (i.e. without involving the server) you can use the `.download_menu` method which creates a `TextInput` and `Button` widget, which allow setting the filename and triggering the download respectively:
```
download_df = pd.DataFrame(np.random.randn(10, 5), columns=list('ABCDE'))
download_table = pn.widgets.Tabulator(download_df)
filename, button = download_table.download_menu(
text_kwargs={'name': 'Enter filename', 'value': 'default.csv'},
button_kwargs={'name': 'Download table'}
)
pn.Row(
pn.Column(filename, button),
download_table
)
```
## Streaming
When we are monitoring some source of data that updates over time, we may want to update the table with the newly arriving data. However, we do not want to transmit the entire dataset each time. To handle efficient transfer of just the latest data, we can use the `.stream` method on the `Tabulator` object:
```
stream_df = pd.DataFrame(np.random.randn(10, 5), columns=list('ABCDE'))
stream_table = pn.widgets.Tabulator(stream_df, layout='fit_columns', width=450)
stream_table
```
As example, we will schedule a periodic callback that streams new data every 1000ms (i.e. 1s) five times in a row:
```
def stream_data(follow=True):
stream_df = pd.DataFrame(np.random.randn(10, 5), columns=list('ABCDE'))
stream_table.stream(stream_df, follow=follow)
pn.state.add_periodic_callback(stream_data, period=1000, count=5)
```
If you are viewing this example with a live Python kernel you will be able to watch the table update and scroll along. If we want to disable the scrolling behavior, we can set `follow=False`:
```
stream_data(follow=False)
```
## Patching
In certain cases we don't want to update the table with new data but just patch existing data.
```
patch_table = pn.widgets.Tabulator(df[['int', 'float', 'str', 'bool']])
patch_table
```
The easiest way to patch the data is by supplying a dictionary as the patch value. The dictionary should have the following structure:
```python
{
column: [
(index: int or slice, value),
...
],
...
}
```
As an example, below we will patch the 'bool' and 'int' columns. On the `'bool'` column we will replace the 0th and 2nd row and on the `'int'` column we replace the first two rows:
```
patch_table.patch({
'bool': [
(0, False),
(2, False)
],
'int': [
(slice(0, 2), [3, 2])
]
})
```
## Static Configuration
Panel does not expose all options available from Tabulator, if a desired option is not natively supported, it can be set via the `configuration` argument.
This dictionary can be seen as a base dictionary which the tabulator object fills and passes to the Tabulator javascript-library.
As an example, we can turn off sorting and resizing of columns by disabling the `headerSort` and `resizable` options.
```
df = pd.DataFrame({
'int': [1, 2, 3],
'float': [3.14, 6.28, 9.42],
'str': ['A', 'B', 'C'],
'bool': [True, False, True],
'date': [dt.date(2019, 1, 1), dt.date(2020, 1, 1), dt.date(2020, 1, 10)]
}, index=[1, 2, 3])
df_widget = pn.widgets.Tabulator(df, configuration={'columnDefaults': {
'resizable': False,
'headerSort': True
}})
df_widget.servable()
```
These and other available tabulator options are listed at http://tabulator.info/docs/4.9/options.
Obviously not all options will work though, especially any settable callbacks and options which are set by the internal panel tabulator module (for example the `columns` option).
Additionally it should be noted that the configuration parameter is not responsive so it can only be set at instantiation time.
| github_jupyter |
```
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/main/vertex-ai-samples/notebooks/community/feature_store/mobile_gaming_feature_store.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/inardini/vertex-ai-samples/blob/main/vertex-ai-samples/notebooks/community/feature_store/mobile_gaming_feature_store.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
</table>
## Overview
Imagine you are a member of the Data Science team working on the same Mobile Gaming application reported in the [Churn prediction for game developers using Google Analytics 4 (GA4) and BigQuery ML](https://cloud.google.com/blog/topics/developers-practitioners/churn-prediction-game-developers-using-google-analytics-4-ga4-and-bigquery-ml) blog post.
Your team successfully implemented a model that determines the likelihood of specific users returning to your app and consumes that insight to drive marketing incentives. As a result, the company consolidates its user base.
Now, businesses want to use that information in real-time to monetize it by implementing a conditional ads system. In particular, each time a user plays with the app, they want to display ads depending on the customer demographic, behavioral information and the resulting propensity of return. Of course, the new application should work with a minimum impact on the user experience.
Given the business challenge, the team is required to design and build a possible serving system which needs to minimize real-time prediction serving latency.
The assumptions are:
1. Predictions would be delivered synchronously
2. Scalability, support for multiple ML frameworks and security are essential.
3. Only demographic features (country, operating system and language) passed in real time.
2. The system would be able to handle behavioral features as static reference features calculated each 24h (offline batch feature engineering job).
3. It has to migrate training serving skew by a timestamp data model, a point-in-time lookups to avoid data leakage and a feature distribution monitoring service.
Based on those assumptions, low read-latency lookup data store and a performing serving engine are needed. Indeed, about the data store, even if you can implement governance on BigQuery, it is still not optimized for singleton lookup operations. Also, the solution need a low overhead serving system that can seamlessly scale up and down based on requests.
Last year, Google Cloud announced Vertex AI, a managed machine learning (ML) platform that allows data science teams to accelerate the deployment and maintenance of ML models. The platform is composed of several building blocks and two of them are Vertex AI Feature store and Vertex AI prediction.
With Vertex AI Feature store, you have a managed service for low latency scalable feature serving. It also provides a centralized feature repository with easy APIs to search & discover features and feature monitoring capabilities to track drift and other quality issues. With Vertex AI Prediction, you will deploy models into production more easily with online serving via HTTP or batch prediction for bulk scoring. It offers a unified scalable framework to deploy custom models trained in TensorFlow, scikit or XGB, as well as BigQuery ML and AutoML models, and on a broad range of machine types and GPUs.
Below the high level picture puts together once the team decides to go with Google Cloud:
<img src="./assets/solution_overview_final.png"/>
In order:
1. Once you create historical features, they are ingested into Vertex AI Feature store
2. Then you can train and deploy the model using BigQuery (or AutoML)
3. Once the model is deployed, the ML serving engine will receive a prediction request passing entity ID and demographic attributes.
4. Features related to a specific entity will be retrieved from the Vertex AI Feature store and passed them as inputs to the model for online prediction.
5. The predictions will be returned back to the activation layer.
### Dataset
The dataset is the public sample export data from an actual mobile game app called "Flood It!" (Android, iOS)
### Objective
In the following notebook, you will learn the role of Vertex AI Feature Store in a scenario when the user's activities within the first 24 hours of first user engagement and the gaming platform would consume in order to offer conditional ads.
**Notice that we assume that already know how to set up a Vertex AI Feature store. In case you are not, please check out [this detailed notebook](https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/official/feature_store/gapic-feature-store.ipynb).**
At the end, you will more confident about how Vertex AI Feature store
1. Provide a centralized feature repository with easy APIs to search & discover features and fetch them for training/serving.
2. Simplify deployments of models for Online Prediction, via low latency scalable feature serving.
3. Mitigate training serving skew and data leakage by performing point in time lookups to fetch historical data for training.
### Costs
This tutorial uses billable components of Google Cloud:
* Vertex AI
* BigQuery
* Cloud Storage
Learn about [Vertex AI
pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage
pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
Calculator](https://cloud.google.com/products/calculator/)
to generate a cost estimate based on your projected usage.
### Set up your local development environment
**If you are using Colab or Google Cloud Notebooks**, your environment already meets
all the requirements to run this notebook. You can skip this step.
**Otherwise**, make sure your environment meets this notebook's requirements.
You need the following:
* The Google Cloud SDK
* Git
* Python 3
* virtualenv
* Jupyter notebook running in a virtual environment with Python 3
The Google Cloud guide to [Setting up a Python development
environment](https://cloud.google.com/python/setup) and the [Jupyter
installation guide](https://jupyter.org/install) provide detailed instructions
for meeting these requirements. The following steps provide a condensed set of
instructions:
1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)
1. [Install Python 3.](https://cloud.google.com/python/setup#installing_python)
1. [Install
virtualenv](https://cloud.google.com/python/setup#installing_and_using_virtualenv)
and create a virtual environment that uses Python 3. Activate the virtual environment.
1. To install Jupyter, run `pip3 install jupyter` on the
command-line in a terminal shell.
1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.
1. Open this notebook in the Jupyter Notebook Dashboard.
### Install additional packages
Install additional package dependencies not installed in your notebook environment, such as {XGBoost, AdaNet, or TensorFlow Hub TODO: Replace with relevant packages for the tutorial}. Use the latest major GA version of each package.
```
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# Google Cloud Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
! pip3 install --upgrade pip
! pip3 install {USER_FLAG} --upgrade git+https://github.com/googleapis/python-aiplatform.git@main -q --no-warn-conflicts
! pip3 install {USER_FLAG} --upgrade pandas==1.3.5 -q --no-warn-conflicts
! pip3 install {USER_FLAG} --upgrade google-cloud-bigquery==2.24.0 -q --no-warn-conflicts
! pip3 install {USER_FLAG} --upgrade tensorflow==2.8.0 -q --no-warn-conflicts
```
### Restart the kernel
After you install the additional packages, you need to restart the notebook kernel so it can find the packages.
```
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
```
## Before you begin
### Set up your Google Cloud project
**The following steps are required, regardless of your notebook environment.**
1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).
1. [Enable the Vertex AI API and Compute Engine API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component).
1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).
1. Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
#### Set your project ID
**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
```
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
```
Otherwise, set your project ID here.
```
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "" # @param {type:"string"}
!gcloud config set project '' #change it
```
#### Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
```
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
```
### Authenticate your Google Cloud account
**If you are using Google Cloud Notebooks**, your environment is already
authenticated. Skip this step.
**If you are using Colab**, run the cell below and follow the instructions
when prompted to authenticate your account via oAuth.
**Otherwise**, follow these steps:
1. In the Cloud Console, go to the [**Create service account key**
page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).
2. Click **Create service account**.
3. In the **Service account name** field, enter a name, and
click **Create**.
4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "Vertex AI"
into the filter box, and select
**Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
5. Click *Create*. A JSON file that contains your key downloads to your
local environment.
6. Enter the path to your service account key as the
`GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell.
```
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# If on Google Cloud Notebooks, then don't execute this code
if not IS_GOOGLE_CLOUD_NOTEBOOK:
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
```
### Create a Cloud Storage bucket
**The following steps are required, regardless of your notebook environment.**
Set the name of your Cloud Storage bucket below. It must be unique across all
Cloud Storage buckets.
You may also change the `REGION` variable, which is used for operations
throughout the rest of this notebook. Make sure to [choose a region where Vertex AI services are
available](https://cloud.google.com/vertex-ai/docs/general/locations#available_regions). You may
not use a Multi-Regional Storage bucket for training with Vertex AI.
```
BUCKET_URI = "" # @param {type:"string"}
REGION = "[your-region]" # @param {type:"string"}
if BUCKET_URI == "" or BUCKET_URI is None or BUCKET_URI == "gs://[your-bucket-name]":
BUCKET_URI = "gs://" + PROJECT_ID + "-aip-" + TIMESTAMP
if REGION == "[your-region]":
REGION = "us-central1"
```
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
```
! gsutil mb -l $REGION -p $PROJECT_ID $BUCKET_URI
```
Run the following cell to grant access to your Cloud Storage resources from Vertex AI Feature store
```
! gsutil uniformbucketlevelaccess set on $BUCKET_URI
```
Finally, validate access to your Cloud Storage bucket by examining its contents:
```
! gsutil ls -al $BUCKET_URI
```
### Create a Bigquery dataset
```
BQ_DATASET = "Mobile_Gaming" # @param {type:"string"}
LOCATION = "US"
!bq mk --location=$LOCATION --dataset $PROJECT_ID:$BQ_DATASET
```
### Import libraries
```
# General
import os
import sys
import time
# Data Engineering
import pandas as pd
# Vertex AI and its Feature Store
from google.cloud import aiplatform as vertex_ai
from google.cloud import bigquery
# EntityType
from google.cloud.aiplatform import Feature, Featurestore
```
### Define constants
```
# Data Engineering and Feature Engineering
FEATURES_TABLE = "wide_features_table" # @param {type:"string"}
MIN_DATE = "2018-10-03"
MAX_DATE = "2018-10-04"
FEATURES_TABLE_DAY_ONE = f"wide_features_table_{MIN_DATE}"
FEATURES_TABLE_DAY_TWO = f"wide_features_table_{MAX_DATE}"
FEATURESTORE_ID = "mobile_gaming" # @param {type:"string"}
ENTITY_TYPE_ID = "user"
# BQ Model Training and Deployment
MODEL_NAME = f"churn_logit_classifier_{TIMESTAMP}"
MODEL_TYPE = "LOGISTIC_REG"
AUTO_CLASS_WEIGHTS = "TRUE"
MAX_ITERATIONS = "50"
INPUT_LABEL_COLS = "churned"
JOB_ID = f"extract_{MODEL_NAME}_{TIMESTAMP}"
MODEL_SOURCE = bigquery.model.ModelReference.from_api_repr(
{"projectId": PROJECT_ID, "datasetId": BQ_DATASET, "modelId": MODEL_NAME}
)
SERVING_DIR = "serving_dir"
DESTINATION_URI = f"{BUCKET_URI}/model"
EXTRACT_JOB_CONFIG = bigquery.ExtractJobConfig(destination_format="ML_TF_SAVED_MODEL")
VERSION = "v1"
SERVING_CONTAINER_IMAGE_URI = (
"us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-7:latest"
)
ENDPOINT_NAME = "mobile_gaming_churn"
DEPLOYED_MODEL_NAME = f"churn_logistic_classifier_{VERSION}"
# Vertex AI Feature store
ONLINE_STORE_NODES_COUNT = 3
ENTITY_ID = "user"
API_ENDPOINT = f"{REGION}-aiplatform.googleapis.com"
FEATURE_TIME = "user_first_engagement"
ENTITY_ID_FIELD = "user_pseudo_id"
BQ_SOURCE_URI_DAY_ONE = f"bq://{PROJECT_ID}.{BQ_DATASET}.{FEATURES_TABLE_DAY_ONE}"
BQ_SOURCE_URI_DAY_TWO = f"bq://{PROJECT_ID}.{BQ_DATASET}.{FEATURES_TABLE_DAY_TWO}"
BQ_DESTINATION_OUTPUT_URI = f"bq://{PROJECT_ID}.{BQ_DATASET}.train_snapshot_{TIMESTAMP}"
SERVING_FEATURE_IDS = {"customer": ["*"]}
READ_INSTANCES_TABLE = f"ground_truth_{TIMESTAMP}"
READ_INSTANCES_URI = f"bq://{PROJECT_ID}.{BQ_DATASET}.{READ_INSTANCES_TABLE}"
# Vertex AI AutoML model
DATASET_NAME = f"churn_mobile_gaming_{TIMESTAMP}"
AUTOML_TRAIN_JOB_NAME = f"automl_classifier_training_{TIMESTAMP}"
AUTOML_MODEL_NAME = f"churn_automl_classifier_{TIMESTAMP}"
MODEL_DEPLOYED_NAME = "churn_automl_classifier_v1"
SERVING_MACHINE_TYPE = "n1-highcpu-4"
MIN_NODES = 1
MAX_NODES = 1
```
### Helpers
```
def run_bq_query(query: str):
"""
An helper function to run a BigQuery job
Args:
query: a formatted SQL query
Returns:
None
"""
try:
job = bq_client.query(query)
_ = job.result()
except RuntimeError as error:
print(error)
def upload_model(
display_name: str,
serving_container_image_uri: str,
artifact_uri: str,
sync: bool = True,
) -> vertex_ai.Model:
"""
Args:
display_name: The name of Vertex AI Model artefact
serving_container_image_uri: The uri of the serving image
artifact_uri: The uri of artefact to import
sync:
Returns: Vertex AI Model
"""
model = vertex_ai.Model.upload(
display_name=display_name,
artifact_uri=artifact_uri,
serving_container_image_uri=serving_container_image_uri,
sync=sync,
)
model.wait()
print(model.display_name)
print(model.resource_name)
return model
def create_endpoint(display_name: str) -> vertex_ai.Endpoint:
"""
An utility to create a Vertex AI Endpoint
Args:
display_name: The name of Endpoint
Returns: Vertex AI Endpoint
"""
endpoint = vertex_ai.Endpoint.create(display_name=display_name)
print(endpoint.display_name)
print(endpoint.resource_name)
return endpoint
def deploy_model(
model: vertex_ai.Model,
machine_type: str,
endpoint: vertex_ai.Endpoint = None,
deployed_model_display_name: str = None,
min_replica_count: int = 1,
max_replica_count: int = 1,
sync: bool = True,
) -> vertex_ai.Model:
"""
An helper function to deploy a Vertex AI Endpoint
Args:
model: A Vertex AI Model
machine_type: The type of machine to serve the model
endpoint: An Vertex AI Endpoint
deployed_model_display_name: The name of the model
min_replica_count: Minimum number of serving replicas
max_replica_count: Max number of serving replicas
sync: Whether to execute method synchronously
Returns: vertex_ai.Model
"""
model_deployed = model.deploy(
endpoint=endpoint,
deployed_model_display_name=deployed_model_display_name,
machine_type=machine_type,
min_replica_count=min_replica_count,
max_replica_count=max_replica_count,
sync=sync,
)
model_deployed.wait()
print(model_deployed.display_name)
print(model_deployed.resource_name)
return model_deployed
def endpoint_predict_sample(
instances: list, endpoint: vertex_ai.Endpoint
) -> vertex_ai.models.Prediction:
"""
An helper function to get prediction from Vertex AI Endpoint
Args:
instances: The list of instances to score
endpoint: An Vertex AI Endpoint
Returns:
vertex_ai.models.Prediction
"""
prediction = endpoint.predict(instances=instances)
print(prediction)
return prediction
def simulate_prediction(
endpoint: vertex_ai.Endpoint, online_sample: dict
) -> vertex_ai.models.Prediction:
"""
An helper function to simulate online prediction with customer entity type
- format entities for prediction
- retrive static features with a singleton lookup operations from Vertex AI Feature store
- run the prediction request and get back the result
Args:
endpoint:
online_sample:
Returns:
vertex_ai.models.Prediction
"""
online_features = pd.DataFrame.from_dict(online_sample)
entity_ids = online_features["entity_id"].tolist()
customer_aggregated_features = customer_entity_type.read(
entity_ids=entity_ids,
feature_ids=[
"cnt_user_engagement",
"cnt_level_start_quickplay",
"cnt_level_end_quickplay",
"cnt_level_complete_quickplay",
"cnt_level_reset_quickplay",
"cnt_post_score",
"cnt_spend_virtual_currency",
"cnt_ad_reward",
"cnt_challenge_a_friend",
"cnt_completed_5_levels",
"cnt_use_extra_steps",
],
)
prediction_sample_df = pd.merge(
customer_aggregated_features.set_index("entity_id"),
online_features.set_index("entity_id"),
left_index=True,
right_index=True,
).reset_index(drop=True)
prediction_sample = prediction_sample_df.to_dict("records")
prediction = endpoint.predict(prediction_sample)
return prediction
```
# Setting the Online (real-time) prediction scenario
As we mentioned at the beginning, this section would simulate the original but this time we introduce Vertex AI for online (real-time) serving. In particular, we will
1. Create static features including demographic and behavioral attibutes
2. Training a simple BQML model
3. Export and deploy the model to Vertex AI endpoint
<img src="./assets/data_processing.png"/>
## Initiate clients
```
bq_client = bigquery.Client(project=PROJECT_ID, location=LOCATION)
vertex_ai.init(project=PROJECT_ID, location=REGION, staging_bucket=BUCKET_URI)
```
## Data and Feature Engineering
The original dataset contains raw event data we cannot ingest in the feature store as they are.
In this section, we will pre-process the raw data into an appropriate format.
**Notice we simulate those transformations in different point of time (day one and day two).**
### Label, Demographic and Behavioral Transformations
This section is based on the [Churn prediction for game developers using Google Analytics 4 (GA4) and BigQuery ML](https://cloud.google.com/blog/topics/developers-practitioners/churn-prediction-game-developers-using-google-analytics-4-ga4-and-bigquery-ml?utm_source=linkedin&utm_medium=unpaidsoc&utm_campaign=FY21-Q2-Google-Cloud-Tech-Blog&utm_content=google-analytics-4&utm_term=-) blog article by Minhaz Kazi and Polong Lin
```
preprocess_sql_query = f"""
CREATE OR REPLACE TABLE
`{PROJECT_ID}.{BQ_DATASET}.{FEATURES_TABLE}` AS
WITH
# query to create label --------------------------------------------------------------------------------
get_label AS (
SELECT
user_pseudo_id,
user_first_engagement,
user_last_engagement,
# EXTRACT(MONTH from TIMESTAMP_MICROS(user_first_engagement)) as month,
# EXTRACT(DAYOFYEAR from TIMESTAMP_MICROS(user_first_engagement)) as julianday,
# EXTRACT(DAYOFWEEK from TIMESTAMP_MICROS(user_first_engagement)) as dayofweek,
#add 24 hr to user's first touch
(user_first_engagement + 86400000000) AS ts_24hr_after_first_engagement,
#churned = 1 if last_touch within 24 hr of app installation, else 0
IF (user_last_engagement < (user_first_engagement + 86400000000),
1,
0 ) AS churned,
#bounced = 1 if last_touch within 10 min, else 0
IF (user_last_engagement <= (user_first_engagement + 600000000),
1,
0 ) AS bounced,
FROM
(
SELECT
user_pseudo_id,
MIN(event_timestamp) AS user_first_engagement,
MAX(event_timestamp) AS user_last_engagement
FROM
`firebase-public-project.analytics_153293282.events_*`
WHERE event_name="user_engagement"
GROUP BY
user_pseudo_id
)
GROUP BY 1,2,3),
# query to create class weights --------------------------------------------------------------------------------
get_class_weights AS (
SELECT
CAST(COUNT(*) / (2*(COUNT(*) - SUM(churned))) AS STRING) AS class_weight_zero,
CAST(COUNT(*) / (2*SUM(churned)) AS STRING) AS class_weight_one,
FROM
get_label
),
# query to extract demographic data for each user ---------------------------------------------------------
get_demographic_data AS (
SELECT * EXCEPT (row_num)
FROM (
SELECT
user_pseudo_id,
geo.country as country,
device.operating_system as operating_system,
device.language as language,
ROW_NUMBER() OVER (PARTITION BY user_pseudo_id ORDER BY event_timestamp DESC) AS row_num
FROM `firebase-public-project.analytics_153293282.events_*`
WHERE event_name="user_engagement")
WHERE row_num = 1),
# query to extract behavioral data for each user ----------------------------------------------------------
get_behavioral_data AS (
SELECT
user_pseudo_id,
SUM(IF(event_name = 'user_engagement', 1, 0)) AS cnt_user_engagement,
SUM(IF(event_name = 'level_start_quickplay', 1, 0)) AS cnt_level_start_quickplay,
SUM(IF(event_name = 'level_end_quickplay', 1, 0)) AS cnt_level_end_quickplay,
SUM(IF(event_name = 'level_complete_quickplay', 1, 0)) AS cnt_level_complete_quickplay,
SUM(IF(event_name = 'level_reset_quickplay', 1, 0)) AS cnt_level_reset_quickplay,
SUM(IF(event_name = 'post_score', 1, 0)) AS cnt_post_score,
SUM(IF(event_name = 'spend_virtual_currency', 1, 0)) AS cnt_spend_virtual_currency,
SUM(IF(event_name = 'ad_reward', 1, 0)) AS cnt_ad_reward,
SUM(IF(event_name = 'challenge_a_friend', 1, 0)) AS cnt_challenge_a_friend,
SUM(IF(event_name = 'completed_5_levels', 1, 0)) AS cnt_completed_5_levels,
SUM(IF(event_name = 'use_extra_steps', 1, 0)) AS cnt_use_extra_steps,
FROM (
SELECT
e.*
FROM
`firebase-public-project.analytics_153293282.events_*` e
JOIN
get_label r
ON
e.user_pseudo_id = r.user_pseudo_id
WHERE
e.event_timestamp <= r.ts_24hr_after_first_engagement
)
GROUP BY 1)
SELECT
PARSE_TIMESTAMP('%Y-%m-%d %H:%M:%S', FORMAT_TIMESTAMP('%Y-%m-%d %H:%M:%S', TIMESTAMP_MICROS(ret.user_first_engagement))) AS user_first_engagement,
# ret.month,
# ret.julianday,
# ret.dayofweek,
dem.*,
CAST(IFNULL(beh.cnt_user_engagement, 0) AS FLOAT64) AS cnt_user_engagement,
CAST(IFNULL(beh.cnt_level_start_quickplay, 0) AS FLOAT64) AS cnt_level_start_quickplay,
CAST(IFNULL(beh.cnt_level_end_quickplay, 0) AS FLOAT64) AS cnt_level_end_quickplay,
CAST(IFNULL(beh.cnt_level_complete_quickplay, 0) AS FLOAT64) AS cnt_level_complete_quickplay,
CAST(IFNULL(beh.cnt_level_reset_quickplay, 0) AS FLOAT64) AS cnt_level_reset_quickplay,
CAST(IFNULL(beh.cnt_post_score, 0) AS FLOAT64) AS cnt_post_score,
CAST(IFNULL(beh.cnt_spend_virtual_currency, 0) AS FLOAT64) AS cnt_spend_virtual_currency,
CAST(IFNULL(beh.cnt_ad_reward, 0) AS FLOAT64) AS cnt_ad_reward,
CAST(IFNULL(beh.cnt_challenge_a_friend, 0) AS FLOAT64) AS cnt_challenge_a_friend,
CAST(IFNULL(beh.cnt_completed_5_levels, 0) AS FLOAT64) AS cnt_completed_5_levels,
CAST(IFNULL(beh.cnt_use_extra_steps, 0) AS FLOAT64) AS cnt_use_extra_steps,
ret.churned as churned,
CASE
WHEN churned = 0 THEN ( SELECT class_weight_zero FROM get_class_weights)
ELSE ( SELECT class_weight_one
FROM get_class_weights)
END AS class_weights
FROM
get_label ret
LEFT OUTER JOIN
get_demographic_data dem
ON
ret.user_pseudo_id = dem.user_pseudo_id
LEFT OUTER JOIN
get_behavioral_data beh
ON
ret.user_pseudo_id = beh.user_pseudo_id
WHERE ret.bounced = 0
"""
run_bq_query(preprocess_sql_query)
```
### Create table to update entities
```
processed_sql_query_day_one = f"""
CREATE OR REPLACE TABLE
`{PROJECT_ID}.{BQ_DATASET}.{FEATURES_TABLE_DAY_ONE}` AS
SELECT
*
FROM
`{PROJECT_ID}.{BQ_DATASET}.{FEATURES_TABLE}`
WHERE
user_first_engagement < '{MAX_DATE}'
"""
processed_sql_query_day_two = f"""
CREATE OR REPLACE TABLE
`{PROJECT_ID}.{BQ_DATASET}.{FEATURES_TABLE_DAY_TWO}` AS
SELECT
*
FROM
`{PROJECT_ID}.{BQ_DATASET}.{FEATURES_TABLE}`
WHERE
user_first_engagement >= '{MAX_DATE}'
"""
queries = processed_sql_query_day_one, processed_sql_query_day_two
for query in queries:
run_bq_query(query)
```
## Model Training
We created demographic and aggregate behavioral features. It is time to train our BQML model.
#### Train an Logistic classifier model
```
train_model_query = f"""
CREATE OR REPLACE MODEL `{PROJECT_ID}.{BQ_DATASET}.{MODEL_NAME}`
OPTIONS(MODEL_TYPE='{MODEL_TYPE}',
AUTO_CLASS_WEIGHTS={AUTO_CLASS_WEIGHTS},
MAX_ITERATIONS={MAX_ITERATIONS},
INPUT_LABEL_COLS=['{INPUT_LABEL_COLS}'])
AS SELECT * EXCEPT(user_first_engagement, user_pseudo_id, class_weights)
FROM `{PROJECT_ID}.{BQ_DATASET}.{FEATURES_TABLE_DAY_ONE}`;
"""
run_bq_query(train_model_query)
```
## Model Deployment
Once we get the model, you can export it and deploy to an Vertex AI Endpoint.
This is just one of the 5 ways to use BigQuery and Vertex AI together. [Check](https://cloud.google.com/blog/products/ai-machine-learning/five-integrations-between-vertex-ai-and-bigquery) this article to know more about them.
#### Export the model
```
model_extract_job = bigquery.ExtractJob(
client=bq_client,
job_id=JOB_ID,
source=MODEL_SOURCE,
destination_uris=[DESTINATION_URI],
job_config=EXTRACT_JOB_CONFIG,
)
try:
job = model_extract_job.result()
except job.error_result as error:
print(error)
```
#### (Locally) Check the SavedModel format
```
%%bash -s "$SERVING_DIR" "$DESTINATION_URI"
mkdir -p -m 777 $1
gsutil cp -r $2 $1
%%bash -s "$SERVING_DIR"
saved_model_cli show --dir $1/model/ --all
```
#### Upload and Deploy Model on Vertex AI Endpoint
```
bq_model = upload_model(
display_name=MODEL_NAME,
serving_container_image_uri=SERVING_CONTAINER_IMAGE_URI,
artifact_uri=DESTINATION_URI,
)
endpoint = create_endpoint(display_name=ENDPOINT_NAME)
deployed_model = deploy_model(
model=bq_model,
machine_type="n1-highcpu-4",
endpoint=endpoint,
deployed_model_display_name=DEPLOYED_MODEL_NAME,
min_replica_count=1,
max_replica_count=1,
sync=True,
)
```
#### Test predictions
```
instance = {
"cnt_ad_reward": 0,
"cnt_challenge_a_friend": 0,
"cnt_completed_5_levels": 0,
"cnt_level_complete_quickplay": 0,
"cnt_level_end_quickplay": 0,
"cnt_level_reset_quickplay": 0,
"cnt_level_start_quickplay": 0,
"cnt_post_score": 0,
"cnt_spend_virtual_currency": 0,
"cnt_use_extra_steps": 0,
"cnt_user_engagement": 14,
"country": "United States",
"language": "en-us",
"operating_system": "ANDROID",
}
bqml_predictions = endpoint_predict_sample(instances=[instance], endpoint=endpoint)
```
# Serve ML features at scale with low latency
At that point, **we deploy our simple model which would requires fetching aggregated attributes as input features in real time**.
That's why **we need a datastore optimized for singleton lookup operations** which would be able to scale and serve those aggregated feature online in low latency.
In other terms, we need to introduce Vertex AI Feature Store. Again, we assume you already know how to set up and work with a Vertex AI Feature store.
## Feature store for features management
In this section, we explore all Feature store management activities from create a Featurestore resource all way down to read feature values online.
Below you can see the feature store data model and a plain representation of how the data will be organized.
<img src="./assets/data_model_3.png"/>
### Create featurestore, ```mobile_gaming```
```
print(f"Listing all featurestores in {PROJECT_ID}")
feature_store_list = Featurestore.list()
if len(list(feature_store_list)) == 0:
print(f"The {PROJECT_ID} is empty!")
else:
for fs in feature_store_list:
print("Found featurestore: {}".format(fs.resource_name))
try:
mobile_gaming_feature_store = Featurestore.create(
featurestore_id=FEATURESTORE_ID,
online_store_fixed_node_count=ONLINE_STORE_NODES_COUNT,
labels={"team": "dataoffice", "app": "mobile_gaming"},
sync=True,
)
except RuntimeError as error:
print(error)
else:
FEATURESTORE_RESOURCE_NAME = mobile_gaming_feature_store.resource_name
print(f"Feature store created: {FEATURESTORE_RESOURCE_NAME}")
```
### Create the ```User``` entity type and its features
```
try:
user_entity_type = mobile_gaming_feature_store.create_entity_type(
entity_type_id=ENTITY_ID, description="User Entity", sync=True
)
except RuntimeError as error:
print(error)
else:
USER_ENTITY_RESOURCE_NAME = user_entity_type.resource_name
print("Entity type name is", USER_ENTITY_RESOURCE_NAME)
```
### Set Feature Monitoring
Feature [monitoring](https://cloud.google.com/vertex-ai/docs/featurestore/monitoring) is in preview, so you need to use v1beta1 Python which is a lower-level API than the one we've used so far in this notebook.
The easiest way to set this for now is using [console UI](https://console.cloud.google.com/vertex-ai/features). For completeness, below is example to do this using v1beta1 SDK.
```
from google.cloud.aiplatform_v1beta1 import \
FeaturestoreServiceClient as v1beta1_FeaturestoreServiceClient
from google.cloud.aiplatform_v1beta1.types import \
entity_type as v1beta1_entity_type_pb2
from google.cloud.aiplatform_v1beta1.types import \
featurestore_monitoring as v1beta1_featurestore_monitoring_pb2
from google.cloud.aiplatform_v1beta1.types import \
featurestore_service as v1beta1_featurestore_service_pb2
from google.protobuf.duration_pb2 import Duration
v1beta1_admin_client = v1beta1_FeaturestoreServiceClient(
client_options={"api_endpoint": API_ENDPOINT}
)
v1beta1_admin_client.update_entity_type(
v1beta1_featurestore_service_pb2.UpdateEntityTypeRequest(
entity_type=v1beta1_entity_type_pb2.EntityType(
name=v1beta1_admin_client.entity_type_path(
PROJECT_ID, REGION, FEATURESTORE_ID, ENTITY_ID
),
monitoring_config=v1beta1_featurestore_monitoring_pb2.FeaturestoreMonitoringConfig(
snapshot_analysis=v1beta1_featurestore_monitoring_pb2.FeaturestoreMonitoringConfig.SnapshotAnalysis(
monitoring_interval=Duration(seconds=86400), # 1 day
),
),
),
)
)
```
### Create features
#### Create Feature configuration
For simplicity, I created the configuration in a declarative way. Of course, we can create an helper function to built it from Bigquery schema.
Also notice that we want to pass some feature on-fly. In this case, it country, operating system and language looks perfect for that.
```
feature_configs = {
"country": {
"value_type": "STRING",
"description": "The country of customer",
"labels": {"status": "passed"},
},
"operating_system": {
"value_type": "STRING",
"description": "The operating system of device",
"labels": {"status": "passed"},
},
"language": {
"value_type": "STRING",
"description": "The language of device",
"labels": {"status": "passed"},
},
"cnt_user_engagement": {
"value_type": "DOUBLE",
"description": "A variable of user engagement level",
"labels": {"status": "passed"},
},
"cnt_level_start_quickplay": {
"value_type": "DOUBLE",
"description": "A variable of user engagement with start level",
"labels": {"status": "passed"},
},
"cnt_level_end_quickplay": {
"value_type": "DOUBLE",
"description": "A variable of user engagement with end level",
"labels": {"status": "passed"},
},
"cnt_level_complete_quickplay": {
"value_type": "DOUBLE",
"description": "A variable of user engagement with complete status",
"labels": {"status": "passed"},
},
"cnt_level_reset_quickplay": {
"value_type": "DOUBLE",
"description": "A variable of user engagement with reset status",
"labels": {"status": "passed"},
},
"cnt_post_score": {
"value_type": "DOUBLE",
"description": "A variable of user score",
"labels": {"status": "passed"},
},
"cnt_spend_virtual_currency": {
"value_type": "DOUBLE",
"description": "A variable of user virtual amount",
"labels": {"status": "passed"},
},
"cnt_ad_reward": {
"value_type": "DOUBLE",
"description": "A variable of user reward",
"labels": {"status": "passed"},
},
"cnt_challenge_a_friend": {
"value_type": "DOUBLE",
"description": "A variable of user challenges with friends",
"labels": {"status": "passed"},
},
"cnt_completed_5_levels": {
"value_type": "DOUBLE",
"description": "A variable of user level 5 completed",
"labels": {"status": "passed"},
},
"cnt_use_extra_steps": {
"value_type": "DOUBLE",
"description": "A variable of user extra steps",
"labels": {"status": "passed"},
},
"churned": {
"value_type": "INT64",
"description": "A variable of user extra steps",
"labels": {"status": "passed"},
},
"class_weights": {
"value_type": "STRING",
"description": "A variable of class weights",
"labels": {"status": "passed"},
},
}
```
#### Create features using `batch_create_features` method
```
try:
user_entity_type.batch_create_features(feature_configs=feature_configs, sync=True)
except RuntimeError as error:
print(error)
else:
for feature in user_entity_type.list_features():
print("")
print(f"The resource name of {feature.name} feature is", feature.resource_name)
```
### Search features
```
feature_query = "feature_id:cnt_user_engagement"
searched_features = Feature.search(query=feature_query)
searched_features
```
### Import ```User``` feature values using ```ingest_from_bq``` method
You need to import feature values before you can use them for online/offline serving.
```
FEATURES_IDS = [feature.name for feature in user_entity_type.list_features()]
try:
user_entity_type.ingest_from_bq(
feature_ids=FEATURES_IDS,
feature_time=FEATURE_TIME,
bq_source_uri=BQ_SOURCE_URI_DAY_ONE,
entity_id_field=ENTITY_ID_FIELD,
disable_online_serving=False,
worker_count=20,
sync=True,
)
except RuntimeError as error:
print(error)
```
**Comment: How does Vertex AI Feature Store mitigate training serving skew?**
Let's just think about what is happening for a second.
We just ingest customer behavioral features we engineered before when we trained the model. And we are now going to serve the same features for online prediction.
But, what if those attributes on the incoming prediction requests would differ with respect to the one calculated during the model training? In particular, what if the correct attributes have different characteristics as the data the model was trained on? At that point, you should start perceiving this idea of **skew** between training and serving data. So what? Imagine now that the mobile gaming app go trending and users start challenging friends more frequently. This would change the distribution of the `cnt_challenge_a_friend`. But the model, which estimates your churn probability, was trained on a different distribution. And if we assume that type and frequency of ads depend on those predictions, it would happen that you target wrong users with wrong ads with an expected frequency because this offline-online feature inconsistency.
**Vertex AI Feature store** addresses those skew by an ingest-one and and re-used many logic. Indeed, once the feature is computed, the same features would be available both in training and serving.
## Simulate online prediction requests
```
online_sample = {
"entity_id": ["DE346CDD4A6F13969F749EA8047F282A"],
"country": ["United States"],
"operating_system": ["IOS"],
"language": ["en"],
}
prediction = simulate_prediction(endpoint=endpoint, online_sample=online_sample)
print(prediction)
```
# Train a new churn ML model using Vertex AI AutoML
Now assume that you have a meeting with the team and you decide to use Vertex AI AutoML to train a new version of the model.
But while you were discussing about that, new data where ingested into the feature store.
## Ingest new data in the feature store
```
try:
user_entity_type.ingest_from_bq(
feature_ids=FEATURES_IDS,
feature_time=FEATURE_TIME,
bq_source_uri=BQ_SOURCE_URI_DAY_TWO,
entity_id_field=ENTITY_ID_FIELD,
disable_online_serving=False,
worker_count=1,
sync=True,
)
except RuntimeError as error:
print(error)
```
## Avoid data leakage with point-in-time lookup to fetch training data
Now, without a datastore with a timestamp data model, some data leakage would happen and you would end by training the new model on a different dataset. As a consequence, you cannot compare those models. In order to avoid that, **you need to be able to train model on the same data at same specific point in time we use in the previous version of the model**.
<center><img src="./assets/point_in_time_2.png"/><center/>
**With the Vertex AI Feature store, you can fetch feature values corresponding to a particular timestamp thanks to point-in-time lookup capability.** In terms of SDK, you need to define a `read instances` object which is a list of entity id / timestamp pairs, where the entity id is the `user_pseudo_id` and `user_first_engagement` indicates we want to read the latest information available about that user. In this way, we will be able to reproduce the exact same training sample you need for the new model.
Let's see how to do that.
### Define query for reading instances at a specific point in time
```
# WHERE ABS(MOD(FARM_FINGERPRINT(STRING(user_first_engagement, 'UTC')), 10)) < 8
read_instances_query = f"""
CREATE OR REPLACE TABLE
`{PROJECT_ID}.{BQ_DATASET}.{READ_INSTANCES_TABLE}` AS
SELECT
user_pseudo_id as customer,
TIMESTAMP_TRUNC(CURRENT_TIMESTAMP(), SECOND, "UTC") as timestamp
FROM
`{BQ_DATASET}.{FEATURES_TABLE_DAY_ONE}` AS e
ORDER BY
user_first_engagement
"""
```
### Create the BigQuery instances table
```
run_bq_query(read_instances_query)
```
### Serve features for batch training
```
mobile_gaming_feature_store.batch_serve_to_bq(
bq_destination_output_uri=BQ_DESTINATION_OUTPUT_URI,
serving_feature_ids=SERVING_FEATURE_IDS,
read_instances_uri=READ_INSTANCES_URI,
)
```
## Train and Deploy AutoML model on Vertex AI
Now that we reproduce the training sample, we use the Vertex AI SDK to train an new version of the model using Vertex AI AutoML.
### Create the Managed Tabular Dataset from a CSV
```
dataset = vertex_ai.TabularDataset.create(
display_name=DATASET_NAME,
bq_source=BQ_DESTINATION_OUTPUT_URI,
)
dataset.resource_name
```
### Create and Launch the Training Job to build the Model
```
automl_training_job = vertex_ai.AutoMLTabularTrainingJob(
display_name=AUTOML_TRAIN_JOB_NAME,
optimization_prediction_type="classification",
optimization_objective="maximize-au-roc",
column_transformations=[
{"categorical": {"column_name": "country"}},
{"categorical": {"column_name": "operating_system"}},
{"categorical": {"column_name": "language"}},
{"numeric": {"column_name": "cnt_user_engagement"}},
{"numeric": {"column_name": "cnt_level_start_quickplay"}},
{"numeric": {"column_name": "cnt_level_end_quickplay"}},
{"numeric": {"column_name": "cnt_level_complete_quickplay"}},
{"numeric": {"column_name": "cnt_level_reset_quickplay"}},
{"numeric": {"column_name": "cnt_post_score"}},
{"numeric": {"column_name": "cnt_spend_virtual_currency"}},
{"numeric": {"column_name": "cnt_ad_reward"}},
{"numeric": {"column_name": "cnt_challenge_a_friend"}},
{"numeric": {"column_name": "cnt_completed_5_levels"}},
{"numeric": {"column_name": "cnt_use_extra_steps"}},
],
)
# This will take around an 2 hours to run
automl_model = automl_training_job.run(
dataset=dataset,
target_column=INPUT_LABEL_COLS,
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
weight_column="class_weights",
model_display_name=AUTOML_MODEL_NAME,
disable_early_stopping=False,
)
```
### Deploy Model to the same Endpoint with Traffic Splitting
Vertex AI Endpoint provides a managed traffic splitting service. All you need to do is to define the splitting policy and then the service will deal it for you.
Be sure that both models have the same serving function. In our case both BQML Logistic classifier and Vertex AI AutoML support same prediction format.
```
model_deployed_id = endpoint.list_models()[0].id
RETRAIN_TRAFFIC_SPLIT = {"0": 50, model_deployed_id: 50}
endpoint.deploy(
automl_model,
deployed_model_display_name=MODEL_DEPLOYED_NAME,
traffic_split=RETRAIN_TRAFFIC_SPLIT,
machine_type=SERVING_MACHINE_TYPE,
accelerator_count=0,
min_replica_count=MIN_NODES,
max_replica_count=MAX_NODES,
)
```
## Time to simulate online predictions
```
for i in range(2000):
simulate_prediction(endpoint=endpoint, online_sample=online_sample)
time.sleep(1)
```
Below the Vertex AI Endpoint UI result you would able to see after the online prediction simulation ends
<img src="./assets/prediction_results.jpg"/>
## Cleaning up
To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud
project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial
```
# delete feature store
mobile_gaming_feature_store.delete(sync=True, force=True)
# delete Vertex AI resources
endpoint.undeploy_all()
bq_model.delete()
automl_model.delete()
%%bash -s "$SERVING_DIR"
rm -Rf $1
# Warning: Setting this to true will delete everything in your bucket
delete_bucket = False
if delete_bucket and "BUCKET_URI" in globals():
! gsutil -m rm -r $BUCKET_URI
# Delete the BigQuery Dataset
!bq rm -r -f -d $PROJECT_ID:$BQ_DATASET
```
| github_jupyter |
# VGGNet in Keras
In this notebook, we fit a model inspired by the "very deep" convolutional network [VGGNet](https://arxiv.org/pdf/1409.1556.pdf) to classify flowers into the 17 categories of the Oxford Flowers data set. Derived from [these](https://github.com/the-deep-learners/TensorFlow-LiveLessons/blob/master/notebooks/old/L3-3c__TFLearn_VGGNet.ipynb) [two](https://github.com/the-deep-learners/TensorFlow-LiveLessons/blob/master/notebooks/alexnet_in_keras.ipynb) earlier notebooks.
#### Set seed for reproducibility
```
import numpy as np
np.random.seed(42)
```
#### Load dependencies
```
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.callbacks import TensorBoard # for part 3.5 on TensorBoard
```
#### Load *and preprocess* data
```
import tflearn.datasets.oxflower17 as oxflower17
X, Y = oxflower17.load_data(one_hot=True)
```
#### Design neural network architecture
```
model = Sequential()
model.add(Conv2D(64, 3, activation='relu', input_shape=(224, 224, 3)))
model.add(Conv2D(64, 3, activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(BatchNormalization())
model.add(Conv2D(128, 3, activation='relu'))
model.add(Conv2D(128, 3, activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(BatchNormalization())
model.add(Conv2D(256, 3, activation='relu'))
model.add(Conv2D(256, 3, activation='relu'))
model.add(Conv2D(256, 3, activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(BatchNormalization())
model.add(Conv2D(512, 3, activation='relu'))
model.add(Conv2D(512, 3, activation='relu'))
model.add(Conv2D(512, 3, activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(BatchNormalization())
model.add(Conv2D(512, 3, activation='relu'))
model.add(Conv2D(512, 3, activation='relu'))
model.add(Conv2D(512, 3, activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(17, activation='softmax'))
model.summary()
```
#### Configure model
```
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
```
#### Configure TensorBoard (for part 5 of lesson 3)
```
tensorbrd = TensorBoard('logs/vggnet')
```
#### Train!
```
model.fit(X, Y, batch_size=64, epochs=16, verbose=1, validation_split=0.1, shuffle=True,
callbacks=[tensorbrd])
```
| github_jupyter |
# UAS Collected Traffic Data Analysis (UAS4T)
The scope of the competition is to evaluate the accuracy of statistical or CI methods in transportation-related
detection problems with specific reference in queue formation in urban arterials. The focus will be on obtaining
results as close as possible to the real data.
As per the requirements of the competition, we tried to develop an algorithm to estimate the maximum length of the queues per lane that
are formed for different approaches of an intersection and roads (to be specified) during the monitoring duration.
This algorithm outputs following components:
i. Maximum length of queue
ii. Lane the maximum length occurred
iii. Coordinates of the start and end of the maximum queue
iv. Timestamp of the maximum queue occurrence
v. Whether, when and where a spillback is formed
```
import numpy as np
import cv2
import csv
import json
import os
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
from matplotlib import pyplot as plt
#DISP : Display Flag for Vehicle Trajectory,Lanes and Spillback
DISP = 1
#Color Indications of all the vehicles from the given data
obj_types = {' Car': {'color': (255, 70, 70),
'width': 5,
'height': 5, },
' Medium Vehicle': {'color': (70, 255, 70),
'width': 4,
'height': 4, },
' Motorcycle': {'color': (70, 70, 255),
'width': 3,
'height': 3, },
' Heavy Vehicle': {'color': (255, 255, 0),
'width': 6,
'height': 6, },
' Bus': {'color': (70, 100, 255),
'width': 6,
'height': 6, },
' Taxi': {'color': (255, 0, 255),
'width': 4,
'height': 4, }, }
#Properties of the trajectory
traj_props = {}
traj_props['lat_min'] = 1000000
traj_props['lat_max'] = 0
traj_props['lon_min'] = 1000000
traj_props['lon_max'] = 0
traj_props['lon_diff'] = 0
traj_props['lat_diff'] = 0
traj_props['max_time'] = 813
traj_props['min_time'] = 0
traj_props['img_height'] = 0
traj_props['img_width'] = 0
traj_props['scale_trajectory'] = 200000
traj_props['longitude_km'] = 111.2
traj_props['lattitude_km'] = 127.2
#Route information initialization
routes_information={}
routes_names = ['LeofAlexandras_tw_28isOktovriou', 'OktovriouIs28_tw_LeofAlexandras', 'OktovriouIs28_tw_South']
for route in routes_names:
route_information={}
route_information['direction']=0
route_information['orientation_range'] = []
route_information['max_queue']={}
route_information['max_queue']['length'] = 0
route_information['max_queue']['time'] = 0.00
route_information['max_queue']['points'] = []
route_information['max_queue']['n_vehicles'] = []
routes_information[route]= route_information
routes_information['LeofAlexandras_tw_28isOktovriou']['direction'] = 0
routes_information['OktovriouIs28_tw_LeofAlexandras']['direction'] = 225
routes_information['OktovriouIs28_tw_South']['direction'] = 90
routes_information['LeofAlexandras_tw_28isOktovriou']['orientation_range'] = [337.5, 22.5]
routes_information['OktovriouIs28_tw_LeofAlexandras']['orientation_range'] = [202.5, 247.5]
routes_information['OktovriouIs28_tw_South']['orientation_range'] = [67.5, 112.5]
routes_information['LeofAlexandras_tw_28isOktovriou']['lane_axis'] = [0, 0] # 0 col 1 row
routes_information['OktovriouIs28_tw_LeofAlexandras']['lane_axis'] = [0, 1]
routes_information['OktovriouIs28_tw_South']['lane_axis'] = [1, 1]
```
### Locating the Lane Areas
Location of the lane areas is determined by anchor drawing concept. Proposed approach is modified / improved version of edge drawing algorithm and called as anchor drawing. In anchor drawing, we will draw a line passing through the maximum peak pixels. We get continuous line in anchor drawing compared to edge drawing.
```
class AnchorDrawing:
"""
A class to draw the anchor line to locate the lane.
...
Attributes
----------
Methods
-------
moveUp_(x, y):
Compute next peak value towards up direction
moveDown_(x, y):
Compute next peak value towards down direction
moveRight_(x,y):
Compute next peak value towards right direction
moveLeft_(x, y):
Compute next peak value towards left direction
moveon_peak_points(x, y, direct_next):
Compute the next peaks
compute_anchors(image):
Computing initial peak or anchor points list
Draw(image):
Entry point of the class
"""
# initiation
def __init__(self):
self.anchorThreshold_ = 50
# dimension of image
self.height = 0
self.width = 0
self.Degree = np.array([])
self.anchor_image = np.array([])
self.Img = np.array([])
self.horizontal_move = 1
self.vertical_move = -1
self.left_move = -1
self.right_move = 1
self.up_move = -1
self.down_move = 1
def moveUp_(self, x, y):
'''
Compute next peak value towards up direction
Input Parameters:
x: Row index
y: Column index
Output Parameters:
list_points: Segment of peak points
direct_next: Search direction of left side similart to right, up and down
'''
list_points = [] # array to store peak points
direct_next = None # search direction of left side similart to right, up and down
while x > 0 and self.Img[x, y] > 0 and not self.anchor_image[x, y]:
next_y = [max(0, y - 1), y, min(self.width - 1, y + 1)] # search in a valid area
list_points.append((x, y)) # extend line segments
if self.Degree[x, y] == self.vertical_move:
self.anchor_image[x, y] = True # mark as anchor peak
y_last = y # record parent pixel
x, y = x - 1, next_y[np.argmax(self.Img[x - 1, next_y])] # walk to next pixel with max gradient
else:
direct_next = y - y_last # change direction to continue search
break # stop and proceed to next search
return list_points, direct_next
def moveDown_(self, x, y):
'''
Compute next peak value towards down direction
Input Parameters:
x: Row index
y: Column index
Output Parameters:
list_points: Segment of peak points
direct_next: Search direction of left side similart to right, up and down
'''
list_points = []
direct_next = None
while x < self.height - 1 and self.Img[x, y] > 0 and not self.anchor_image[x, y]:
next_y = [max(0, y - 1), y, min(self.width - 1, y + 1)]
list_points.append((x, y))
if self.Degree[x, y] == self.vertical_move:
self.anchor_image[x, y] = True
y_last = y
x, y = x + 1, next_y[np.argmax(self.Img[x + 1, next_y])]
else:
direct_next = y - y_last
break
return list_points, direct_next
def moveRight_(self, x, y):
'''
Compute next peak value towards right direction
Input Parameters:
x: Row index
y: Column index
Output Parameters:
list_points: Segment of peak points
direct_next: Search direction of left side similart to right, up and down
'''
list_points = []
direct_next = None
while y < self.width - 1 and self.Img[x, y] > 0 and not self.anchor_image[x, y]:
next_x = [max(0, x - 1), x, min(self.height - 1, x + 1)]
list_points.append((x, y))
if self.Degree[x, y] == self.horizontal_move:
self.anchor_image[x, y] = True
x_last = x
x, y = next_x[np.argmax(self.Img[next_x, y + 1])], y + 1
else:
direct_next = x - x_last
break
return list_points, direct_next
def moveLeft_(self, x, y):
'''
Compute next peak value towards left direction
Input Parameters:
x: Row index
y: Column index
Output Parameters:
list_points: Segment of peak points
direct_next: Search direction of left side similart to right, up and down
'''
list_points = []
direct_next = None
while y > 0 and self.Img[x, y] > 0 and not self.anchor_image[x, y]:
next_x = [max(0, x - 1), x, min(self.height - 1, x + 1)]
list_points.append((x, y))
if self.Degree[x, y] == self.horizontal_move:
self.anchor_image[x, y] = True
x_last = x
x, y = next_x[np.argmax(self.Img[next_x, y - 1])], y - 1
else:
direct_next = x - x_last
break
return list_points, direct_next
def moveon_peak_points(self, x, y, direct_next):
'''
Returns the
Input Parameters:
x: Row index
y: Column index
direct_next: Search direction of left side similart to right, up and down
Output Parameters:
list_points: Segment of peak points
'''
list_points = [(x, y)]
while direct_next is not None:
x, y = list_points[-1][0], list_points[-1][1]
# if the last point is towords horizontal, search horizontally
if self.Degree[x, y] == self.horizontal_move:
# get points sequence
if direct_next == self.left_move:
s, direct_next = self.moveLeft_(x, y)
elif direct_next == self.right_move:
s, direct_next = self.moveRight_(x, y)
else:
break
elif self.Degree[x, y] == self.vertical_move: # search vertically
if direct_next == self.up_move:
s, direct_next = self.moveUp_(x, y)
elif direct_next == self.down_move:
s, direct_next = self.moveDown_(x, y)
else:
break
else: # invalid point found
break
if len(s) > 1:
list_points.extend(s[1:])
return list_points
# find list of anchors
def compute_anchors(self, image):
'''
Computing initial peak or anchor points list
Input Parameters:
image: Input accumulated image with vehicle trajectories
Output Parameters:
anchor_list: List of anchor points
'''
# detect the anchor points
anchor_list = []
self.Degree = np.zeros(image.shape, np.float64)
for row in range(1, self.height - 1):
for col in range(1, self.width - 1):
if (image[row, col] > self.anchorThreshold_):
if ((image[row - 1, col] < image[row, col] and image[row + 1, col] < image[row, col]) or \
(image[row, col - 1] < image[row, col] and image[row, col + 1] < image[row, col]) or\
(image[row - 1, col - 1] < image[row, col] and image[row + 1, col + 1] < image[row, col]) or\
(image[row - 1, col + 1] < image[row, col] and image[row + 1, col - 1] < image[row, col])):
anchor_list.append((row, col))
ysum = int(image[row-1, col])+image[row+1, col] + image[row-1, col-1]+image[row+1, col-1] + image[row-1, col+1]+image[row+1, col+1]
xsum = int(image[row, col-1])+image[row, col+1] + image[row-1, col-1]+image[row-1, col+1] + image[row+1, col-1]+image[row+1, col+1]
if (ysum > xsum):
self.Degree[row, col] = -1
else:
self.Degree[row, col] = 1
return anchor_list
def Draw(self, image):
'''
Entry point of the class
Input Parameters:
image: Input accumulated image with vehicle trajectories
Output Parameters:
anchor_line: List of anchor points
'''
self.height = image.shape[0]
self.width = image.shape[1]
self.Img = image.copy()
# compute anchor points list
anchor_list = self.compute_anchors(image)
anchor_line = []
self.anchor_image = np.zeros(self.Img.shape, dtype=bool)
for anchor in anchor_list:
if not self.anchor_image[anchor]: # if not mark as anchor peak
# serch for next peak point in direction 1
point_list1 = self.moveon_peak_points(anchor[0], anchor[1], 1)
self.anchor_image[anchor] = False
# serch for next peak point in direction -1
point_list2 = self.moveon_peak_points(anchor[0], anchor[1], -1)
# concat two point lists
if len(point_list1[::-1] + point_list2) > 0:
anchor_line.append(point_list1[::-1] + point_list2[1:])
return anchor_line
def readdata(file_name):
'''
Reading the data from given input csv
Input Parameters:
file_name: csv filename/path
Output Parameters:
data: Dictionary of all trajectory points along with vehicle information
'''
csv_file = open(file_name, 'r')
lines = csv_file.readlines()
num_lines = len(lines)
object_list = []
for row in range(1, num_lines):
# all_lines.append(row)
line = lines[row]
line_parts = line.split(';')
object_prop = {}
object_prop['trajectory'] = {}
object_prop['trajectory']['lat'] = []
object_prop['trajectory']['lon'] = []
object_prop['trajectory']['speed'] = []
object_prop['trajectory']['lon_acc'] = []
object_prop['trajectory']['lat_acc'] = []
object_prop['trajectory']['time'] = []
object_prop['trajectory']['x'] = []
object_prop['trajectory']['y'] = []
object_prop['track_id'] = int(line_parts[0])
object_prop['type'] = line_parts[1]
object_prop['traveled_d'] = float(line_parts[2])
object_prop['avg_speed'] = float(line_parts[3])
for step in range(4, len(line_parts) - 6, 6):
latitude = float(line_parts[step])
longitude = float(line_parts[step + 1])
speed_v = float(line_parts[step + 2])
latitude_acc = float(line_parts[step + 3])
longitude_acc = float(line_parts[step + 4])
time_stamp = float(line_parts[step + 5])
object_prop['trajectory']['lat'].append(latitude)
object_prop['trajectory']['lon'].append(longitude)
object_prop['trajectory']['speed'].append(speed_v)
object_prop['trajectory']['lon_acc'].append(longitude_acc)
object_prop['trajectory']['lat_acc'].append(latitude_acc)
object_prop['trajectory']['time'].append(time_stamp)
if (traj_props['lon_max'] < longitude):
traj_props['lon_max'] = longitude
if (traj_props['lat_max'] < latitude):
traj_props['lat_max'] = latitude
if (traj_props['lon_min'] > longitude):
traj_props['lon_min'] = longitude
if (traj_props['lat_min'] > latitude):
traj_props['lat_min'] = latitude
if (traj_props['min_time'] > time_stamp):
traj_props['min_time'] = time_stamp
if (traj_props['max_time'] < time_stamp):
traj_props['max_time'] = time_stamp
object_list.append(object_prop)
traj_props['lon_diff'] = traj_props['lon_max'] - traj_props['lon_min']
traj_props['lat_diff'] = traj_props['lat_max'] - traj_props['lat_min']
traj_props['img_height'] = int(round(traj_props['lat_diff'] * traj_props['scale_trajectory']))
traj_props['img_width'] = int(round(traj_props['lon_diff'] * traj_props['scale_trajectory']))
data = {}
data['object_list'] = object_list
data['traj_props'] = traj_props
return data
def get_line(point1, point2):
'''
Computes list of line points between two points
Input Parameters:
point1:(Col, Row)
point2:(Col, Row)
Output Parameters:
points: list of line points from point1 to point2
'''
points = []
issteep = abs(point2[1] - point1[1]) > abs(point2[0] - point1[0])
if issteep:
point1[0], point1[1] = point1[1], point1[0]
point2[0], point2[1] = point2[1], point2[0]
rev = False
if point1[0] > point2[0]:
point1[0], point2[0] = point2[0], point1[0]
point1[1], point2[1] = point2[1], point1[1]
rev = True
deltax = point2[0] - point1[0]
deltay = abs(point2[1] - point1[1])
error = int(deltax / 2)
y = point1[1]
if point1[1] < point2[1]:
ystep = 1
else:
ystep = -1
for x in range(point1[0], point2[0] + 1):
if issteep:
points.append((y, x))
else:
points.append((x, y))
error -= deltay
if error < 0:
y += ystep
error += deltax
# Reverse the list if the coordinates were reversed
if rev:
points.reverse()
return points
def find_ang(p1, p2):
'''
Computes the angle of line with respect to horizontal axis
Input Parameters:
p1: (Col, Row)
p2: (Col, Row)
Output Parameters:
angle_in_degrees: Angle of the line(0-360)
'''
# ********90********
# **45********135**
# 0**************180
# **-45*******-135*
# *******-90*******
angle_in_degrees = np.arctan2(p1[1] - p2[1], p1[0] - p2[0]) * 180 / np.pi
if (angle_in_degrees < 0):
angle_in_degrees += 360
return angle_in_degrees
def normalizeimg(accum):
'''
Linear normalization of accumated array with vehicle trajectories
Input Parameters:
accum: accumulated array
Output Parameters:
accum: normalized accumulated array(0-255)
'''
min = 0
try:
min = accum[accum != 0].min()
except:
print()
if (min != 0):
accum[accum != 0] = (accum[accum != 0] - min) * (255 / float(accum.max() - min + 0.000000001))
else:
accum = (accum - accum.min()) * (255 / float(accum.max() - accum.min() + 0.000000001))
return accum
def getLanePoints(img_side,direction=0, rRad = 3 ,n_lanes = 3, start_point = None):
'''
Computes lane areas using anchor drawing algorithm
Input Parameters:
img_side: Normalized accumulated array
direction: Direction of the route
rRad: Road radius approximate value
n_lanes: Number of lanes default
start_point: Starting point of lane (Col, Row)
Output Parameters:
max_length_lanes: Returns lane information of the route with lane points, polygons
'''
img_height = img_side.shape[0]
img_width = img_side.shape[1]
ad = AnchorDrawing()
edges = ad.Draw(img_side)
len_indices = []
[len_indices.append(len(item)) for item in edges]
areas_index = np.argsort(np.array(len_indices))
max_length_lanes = []
for lNum in range(-1, -(n_lanes+1), -1):
lane_info={}
lpoints = edges[areas_index[lNum]]
if direction ==0:
if (lpoints[0][1] > lpoints[-1][1]):
lpoints.reverse()
elif direction == 90:
if (lpoints[0][0] > lpoints[-1][0]):
lpoints.reverse()
if start_point:
row_vector= [p[0] for p in lpoints]
if start_point[1] in row_vector:
idx = row_vector.index(start_point[1])
lpoints = lpoints[idx:]
elif direction == 225:
if (lpoints[0][1] < lpoints[-1][1]):
lpoints.reverse()
lane_info['lane_points'] = [(p[1], p[0]) for p in lpoints]
line1=[]
line2=[]
ang = 0
# Compute polygon area using lane points and road radius
for pnum in range(0, len(lpoints)):
if (pnum < len(lpoints)-1):
ang = find_ang((lpoints[pnum][1],lpoints[pnum][0]), (lpoints[pnum+1][1], lpoints[pnum+1][0]))
if (ang == 45.0 ):# Push in the form of x,y
line1.append(( max(0,lpoints[pnum][1]-rRad),min(img_height, lpoints[pnum][0]+rRad)))
line2.append((min(img_width, lpoints[pnum][1] + rRad) , max(0, lpoints[pnum][0] - rRad)) )
elif(ang == 225.0):
line1.append((min(img_width, lpoints[pnum][1] + rRad), max(0, lpoints[pnum][0] - rRad)) )
line2.append((max(0, lpoints[pnum][1] - rRad), min(img_height, lpoints[pnum][0] + rRad)))
elif(ang == 0.0):
line1.append((lpoints[pnum][1] , min(img_height, lpoints[pnum][0] + rRad)))
line2.append((lpoints[pnum][1] , max(0, lpoints[pnum][0] - rRad)))
elif (ang == 180.0):
line1.append((lpoints[pnum][1], max(0, lpoints[pnum][0] - rRad)) )
line2.append((lpoints[pnum][1], min(img_height, lpoints[pnum][0] + rRad)))
elif(ang == 135.0):
line1.append((max(0, lpoints[pnum][1] - rRad), max(0, lpoints[pnum][0] - rRad)))
line2.append((min(img_width, lpoints[pnum][1] + rRad), min(img_height, lpoints[pnum][0] + rRad)))
elif (ang == 315.0):
line1.append((min(img_width, lpoints[pnum][1] + rRad), min(img_height, lpoints[pnum][0] + rRad)) )
line2.append((max(0, lpoints[pnum][1] - rRad), max(0, lpoints[pnum][0] - rRad)))
elif(ang == 90.0):
line1.append((max(0, lpoints[pnum][1] - rRad), lpoints[pnum][0]))
line2.append((min(img_width, lpoints[pnum][1] + rRad), lpoints[pnum][0]))
elif (ang == 270.0):
line1.append((min(img_width, lpoints[pnum][1] + rRad), lpoints[pnum][0]) )
line2.append((max(0, lpoints[pnum][1] - rRad), lpoints[pnum][0]))
line2.reverse()
poly_lane = line1+line2
poly_lane.append(line1[0])
polygon = Polygon(poly_lane)
lane_info['poly'] = polygon
lane_info['vertices'] = poly_lane
max_length_lanes.append(lane_info)
return max_length_lanes
def get_accumulator(trajectory_data, padd, routes_information):
'''
Computes accumulated array with all the vehicle trajectories
Input Parameters:
trajectory_data: Data contains vehicle trajectory points and properties
padd: Padding value for both rows and columns
routes_information: Contains route direction, orientation ranges
Output Parameters:
accum: Dictionary with accumulated array for all the routes
'''
adj_ang = -15 # Angle adjustment to keep routes aligned with clear direction
point_step = 30 # point step to get smooth line points
ang_step = 2 # angle step to find angle/direction of vehicle in trajectory
accum_weights = [[1, 1, 1],
[1, 2, 1],
[1, 1, 1]] # accumulator weights array highlights hehicle path
accum_weights = np.array(accum_weights, np.uint8) * 2
object_list = trajectory_data['object_list']
traj_props = trajectory_data['traj_props']
padd_x = padd
padd_y = padd
acc_w =traj_props['img_width']+ padd+padd+1
acc_h = traj_props['img_height']+ padd+padd+1
routes_names = list(routes_information.keys())
accum = {}
for route_name in routes_names:
accum[route_name] = np.zeros([acc_h, acc_w], np.uint32)
num_obj = len(object_list)
for id_num in range(0, num_obj):
length = len(object_list[id_num]['trajectory']['time'])
for idx in range(0, length):
data['object_list'][id_num]['trajectory']['x'].append(
data['traj_props']['scale_trajectory'] * (object_list[id_num]['trajectory']['lon'][idx] - traj_props['lon_min']))
data['object_list'][id_num]['trajectory']['y'].append(
(data['traj_props']['scale_trajectory'] * (object_list[id_num]['trajectory']['lat'][idx] - traj_props['lat_min'])))
comp_line_points = []
xx1 = round(data['object_list'][id_num]['trajectory']['x'][0]) + padd_x
yy1 = round(data['object_list'][id_num]['trajectory']['y'][0]) + padd_y
start_point = min(point_step, length)
for idx in range(start_point, length - point_step, point_step):
xx2 = round(data['object_list'][id_num]['trajectory']['x'][idx]) + padd_x
yy2 = round(data['object_list'][id_num]['trajectory']['y'][idx]) + padd_y
points = get_line([xx1, yy1], [xx2, yy2])
[comp_line_points.append(p) for p in points if p not in comp_line_points]
xx1 = xx2
yy1 = yy2
# Compute angle of each point in line
list_ang = []
length = len(comp_line_points)
for idx in range(0, length):
ang = 0
if (length - ang_step > idx):
ang = find_ang(comp_line_points[idx], comp_line_points[idx + ang_step])
list_ang.append(ang)
for idx in range(ang_step, length):
for route_name in routes_names:
if (routes_information[route_name]['orientation_range'][1] < routes_information[route_name]['orientation_range'][0] ):
if (list_ang[idx] <= routes_information[route_name]['orientation_range'][1] + adj_ang or list_ang[idx] > routes_information[route_name]['orientation_range'][0] + adj_ang): # WW
accum[route_name][comp_line_points[idx][1] - padd:comp_line_points[idx][1] + padd + 1,
comp_line_points[idx][0] - padd: comp_line_points[idx][0] + padd + 1] += accum_weights # vec_acc_filter[6]
else:
if (list_ang[idx] <= routes_information[route_name]['orientation_range'][1] + adj_ang and list_ang[idx] > routes_information[route_name]['orientation_range'][0] + adj_ang): # WW
accum[route_name][comp_line_points[idx][1] - padd:comp_line_points[idx][1] + padd + 1,
comp_line_points[idx][0] - padd: comp_line_points[idx][0] + padd + 1] += accum_weights # vec_acc_filter[6]
return accum
def swap_data(lane_data, vehi_num1, vehi_num2):
'''
Swaps vehicle information in lanedata
Input Parameters:
lane_data: Contains vehciles information(points, ids, speed)
vehi_num1: Vehicle index in lane
vehi_num2: Vehicle index in lane
Output Parameters:
'''
# Swap vehicle properties
temp = lane_data['ID'][vehi_num1]
lane_data['ID'][vehi_num1] = lane_data['ID'][vehi_num2]
lane_data['ID'][vehi_num2] = temp
temp = lane_data['x'][vehi_num1]
lane_data['x'][vehi_num1] = lane_data['x'][vehi_num2]
lane_data['x'][vehi_num2] = temp
temp = lane_data['y'][vehi_num1]
lane_data['y'][vehi_num1] = lane_data['y'][vehi_num2]
lane_data['y'][vehi_num2] = temp
temp = lane_data['speed'][vehi_num1]
lane_data['speed'][vehi_num1] = lane_data['speed'][vehi_num2]
lane_data['speed'][vehi_num2] = temp
def sort_ids(routes_data , lane_name, _direction):
'''
Sorts vehicle IDs using position and direction of lane
Input Parameters:
routes_data: Contains route information along with vehicle data
lane_name: Route name
_direction: Direction of the route
Output Parameters:
'''
# Sort vehicle IDs using position and direction of lane
for lane_num in range(0, len(routes_data[lane_name])):
for vehi_num1 in range(0 , len(routes_data[lane_name][lane_num]['ID'])):
for vehi_num2 in range(vehi_num1+1 , len(routes_data[lane_name][lane_num]['ID'])):
if _direction == 0:
if(routes_data[lane_name][lane_num]['x'][vehi_num1] > routes_data[lane_name][lane_num]['x'][vehi_num2]):
swap_data(routes_data[lane_name][lane_num], vehi_num1, vehi_num2)
if _direction == 90:
if(routes_data[lane_name][lane_num]['y'][vehi_num1] > routes_data[lane_name][lane_num]['y'][vehi_num2]):
swap_data(routes_data[lane_name][lane_num], vehi_num1, vehi_num2)
if _direction == 225:
if (routes_data[lane_name][lane_num]['x'][vehi_num1] < routes_data[lane_name][lane_num]['x'][vehi_num2]):
swap_data(routes_data[lane_name][lane_num], vehi_num1, vehi_num2)
def finddist(p1, p2):
'''
Finds distance between two points
Input Parameters:
p1: (Col, Row)
p2: (Col, Row)
Output Parameters:
'''
return ((p1[0]-p2[0])*(p1[0]-p2[0]) + (p1[1]-p2[1])*(p1[1]-p2[1]) ) ** 0.5
def find_queue_info(routes_data, laneInformation, temporal_data, route_name, axis_vec ):
'''
Computes vehicles inqueue with respect to lane number in the route,
Estimates spillback positions according to queue information
Input Parameters:
routes_data: Contains route information along with vehicle data
laneInformation: Lane area information
temporal_data: Previous timeframe routes data
route_name: Name of the route
axis_vec: Axis of start point, end point of the lane
Output Parameters:
queue_spillback_data: Returns array of queues, spillback points with respect to lane, routes
'''
n_lanes = len(routes_data[route_name])
speed_threshold = 7 # vehicle speed threshold (if less consider as queue vehicle)
gap_bw_vehicles = 2 # maximum number of vehicles non stationary of vehicles
max_dist_gap = 30 # max distance gap in queue
threshold_spill_to_lane_startpoint = 20 # if spill happen at start position of lane avoide it
num_temporal_frams = 3 # number of temporal frames to be considered
out_queue_info = []
for l_num in range(0, n_lanes):
out_queue_info.append([])
spillback_data = {}
spillback_data['exist'] = False
queue_data = {}
queue_data['exist'] = False
new_ids_in_current_frame_q = []
route_queue_indices = []
for l_num in range(0, n_lanes): # iterate through lane numbers
list_vehicle_details = {}
list_vehicle_details['point'] = []
list_vehicle_details['index'] = []
# find stationary vehicles in lane
for v_num in range(0, len(routes_data[route_name][l_num]['ID'])): # iterate through vehicles in lane
if (routes_data[route_name][l_num]['speed'][v_num] < speed_threshold ):
list_vehicle_details['point'].append( [ routes_data[route_name][l_num]['x'][v_num] , routes_data[route_name][l_num]['y'][v_num]])
list_vehicle_details['index'].append(v_num)
_queue_list = []
_indices_list = []
_queue = []
_indices= []
# split queues in lane
for idx in range(0, len(list_vehicle_details['index'])-1):
if (list_vehicle_details['index'][idx+1] - list_vehicle_details['index'][idx] <= gap_bw_vehicles and
finddist(list_vehicle_details['point'][idx+1] ,list_vehicle_details['point'][idx]) < max_dist_gap):
if list_vehicle_details['point'][idx] not in _queue:
_queue.append(list_vehicle_details['point'][idx])
_indices.append(list_vehicle_details['index'][idx])
if list_vehicle_details['point'][idx+1] not in _queue:
_queue.append(list_vehicle_details['point'][idx+1])
_indices.append(list_vehicle_details['index'][idx+1])
if (idx == len(list_vehicle_details['index'])-2 ):
if (len(_queue) >1):
_queue_list.append(_queue)
_indices_list.append(_indices)
else:
if (len(_queue) > 1):
_queue_list.append(_queue)
_indices_list.append(_indices)
_queue = []
_indices = []
route_queue_indices.append(_indices_list)
# get queue vehicle information and points
if (len(_queue_list) > 0):
for qnum in range(0, len(_queue_list)):
if (len(_queue_list[qnum]) > 1):
queue_data['exist'] = True
list_axis1_values = [p[axis_vec[0]] for p in laneInformation[route_name][l_num]['lane_points']]
list_axis2_values = [p[axis_vec[1]] for p in laneInformation[route_name][l_num]['lane_points']]
near_val1 = list_axis1_values[min(range(len(list_axis1_values)), key=lambda i: abs(list_axis1_values[i] - _queue_list[qnum][0][axis_vec[0]]))]
near_val2 = list_axis2_values[min(range(len(list_axis2_values)), key=lambda i: abs(list_axis2_values[i] - _queue_list[qnum][-1][axis_vec[1]]))]
s_idx = list_axis1_values.index(near_val1)
e_idx = list_axis2_values.index(near_val2)
if (s_idx> e_idx):
temp = e_idx
e_idx = s_idx
s_idx = temp
qdetails = {}
q_points = []
for _q_line_point in range(s_idx, e_idx+1):
q_points.append(laneInformation[route_name][l_num]['lane_points'][_q_line_point])
qdetails['points'] = q_points
qdetails['n_vehicles'] = _indices_list[qnum]
out_queue_info[l_num].append(qdetails)
# Observing whether spill_back happening
if (temporal_data.count(0) == 0):
spillback_data['ID'] = []
spillback_data['points'] = []
previus_ids = []
for n_frame in range(-1, -num_temporal_frams, -1):
for l_num in range(0, n_lanes):
previus_ids.extend(temporal_data[n_frame][route_name][l_num]['ID'])
for l_num in range(0, n_lanes):
for current_id in routes_data[route_name][l_num]['ID']:
if (current_id not in previus_ids):
new_id_index = routes_data[route_name][l_num]['ID'].index(current_id)
for qind in range(0, len(route_queue_indices[l_num])):
if (qind > 0):
if new_id_index <= route_queue_indices[l_num][qind][0] and new_id_index >= route_queue_indices[l_num][qind-1][0-1]:
spill_point = [routes_data[route_name][l_num]['x'][new_id_index], routes_data[route_name][l_num]['y'][new_id_index]]
dist_from_start_point = finddist(laneInformation[route_name][l_num]['lane_points'][0], spill_point)
if (dist_from_start_point > threshold_spill_to_lane_startpoint):
#spill_back_ids.append(new_id_index)
spillback_data['exist'] = True
spillback_data['ID'].append(routes_data[route_name][l_num]['ID'][new_id_index])
spillback_data['points'].append(spill_point)
elif (qind ==0):
if new_id_index <= route_queue_indices[l_num][qind][0]:
spill_point = [routes_data[route_name][l_num]['x'][new_id_index], routes_data[route_name][l_num]['y'][new_id_index]]
dist_from_start_point = finddist(laneInformation[route_name][l_num]['lane_points'][0], spill_point)
if (dist_from_start_point > threshold_spill_to_lane_startpoint):
spillback_data['exist'] = True
#spill_back_ids.append(new_id_index)
spillback_data['ID'].append(routes_data[route_name][l_num]['ID'][new_id_index])
spillback_data['points'].append([routes_data[route_name][l_num]['x'][new_id_index], routes_data[route_name][l_num]['y'][new_id_index]])
queue_data['points'] = out_queue_info
queue_spillback_data = {}
queue_spillback_data['queue'] = queue_data
queue_spillback_data['spillback'] = spillback_data
return queue_spillback_data
def init_route_info(lane_names):
'''
Initializes route information
Input Parameters:
lane_names: Name of the route
Output Parameters:
routes_data: Initial route information
'''
routes_data={}
for route_name in lane_names:
routes_data[route_name] = []
for l_num in range(0,3):
lane_info = {}
lane_info['ID'] = []
lane_info['speed'] = []
lane_info['x'] = []
lane_info['y'] = []
routes_data[route_name].append(lane_info)
return routes_data
if __name__ == '__main__':
file_spill_back = open('spillback.txt', 'w') # Create results file for spill back posistions
max_length_parameter = 'points' # n_vehicles input parameter to compute maximum queue length points: trajectory length, n_vehicles: number of vehicles
file_name = 'competition_dataset.csv'
data = readdata(file_name)
traj_props = data['traj_props']
# Method 1
padd = 1
routes_accumulater_data = get_accumulator(data, padd, routes_information)
Lane_information = {}# Init lane information
route_names = list(routes_information.keys())
lane_array = {}
for route_name in route_names:
lane_array[route_name] = normalizeimg(routes_accumulater_data[route_name][padd:-padd-1, padd:-padd-1] )
Lane_information['LeofAlexandras_tw_28isOktovriou'] = getLanePoints(lane_array['LeofAlexandras_tw_28isOktovriou'], direction=0)
Lane_information['OktovriouIs28_tw_LeofAlexandras'] = getLanePoints(lane_array['OktovriouIs28_tw_LeofAlexandras'], direction=225)
# selecting OktovriouIs28_tw_South start point based on LeofAlexandras_tw_28isOktovriou start point
Lane_information['OktovriouIs28_tw_South'] = getLanePoints(lane_array['OktovriouIs28_tw_South'], direction=90,
start_point=Lane_information['LeofAlexandras_tw_28isOktovriou'][0]['lane_points'][0])
if DISP:
font = cv2.FONT_HERSHEY_SIMPLEX # Font style for display text
fontScale = 0.5 # fontscale for display text
color = (255, 0, 0) # font color for display text
thickness = 2 # font thickness for display text
legend = np.ones((250, 250, 3), np.uint8) * 255 # vehicle type legend initiolization
keys = list(obj_types.keys()) # object list
gap = 3 # gap pixes between each legend parameters
siz = 15 # size of each legend color box
for key in range(0, len(keys)):# drow legend on image
cv2.rectangle(legend, (gap, gap + (key * siz)), ((siz - gap), (siz - gap) + (key * siz)), obj_types[keys[key]]['color'], -1)
cv2.putText(legend, keys[key], (gap + siz, (siz - gap) + (key * siz)), font, fontScale, obj_types[keys[key]]['color'], 1, cv2.LINE_AA)
ui_img = np.ones((traj_props['img_height'], traj_props['img_width'] , 3), np.uint8) * 255 # Initiolize image to dispay trajectory
legend = np.flipud(legend) # flip virtically legend because world coordinate(Trajectory coordinates) to image coordinates
ui_img[-legend.shape[0]: , -legend.shape[1]:] = legend # add legend to initial trajectory image
# DROW LANE polygon lines
lane_colors = [(255, 200, 200), (200, 255, 200), (200, 200, 255)] # lane colors
for route in route_names:
for nLane in range(0, len(Lane_information[route])):
vert = Lane_information[route][nLane]['vertices'] # vertices
pts = np.array(vert, np.int32)
cv2.polylines(ui_img, [pts], False, lane_colors[nLane], 1)
start_time = 0
end_time = 813
time_step = 0.04
num_samples = int((end_time - start_time) / time_step)
final_sample = int(end_time / time_step)
vec_time = [int((tim_itr * 0.04 * 100) + 0.5) / 100.0 for tim_itr in range(0, num_samples)]
object_list = data['object_list']
temporal_info = [0] * 15 # initiolize buffer for temporal information
spill_back_data_list = []
for time_stamp in vec_time:
if DISP:
image = np.copy(ui_img)
routes_data = init_route_info(route_names)
for obj_id in range(0, len(object_list)):
if time_stamp in object_list[obj_id]['trajectory']['time']:
time_index = object_list[obj_id]['trajectory']['time'].index(time_stamp)
col_range = object_list[obj_id]['trajectory']['x'][time_index]
row_range = object_list[obj_id]['trajectory']['y'][time_index]
traj_point = Point(col_range, row_range) # logitude lattitude
if DISP:
cv2.rectangle(image, (round(col_range) - 1, round(row_range) - 1), (round(col_range) + 1, round(row_range) + 1), obj_types[object_list[obj_id]['type']]['color'], 1)
obj_allocate = True
for route in route_names:
if (obj_allocate):
for nLane in range(0, len(Lane_information[route])):
if(obj_allocate):
if (Lane_information[route][nLane]['poly'].contains(traj_point)):
routes_data[route][nLane]['ID'].append(object_list[obj_id]['track_id'] ) #
routes_data[route][nLane]['speed'].append(object_list[obj_id]['trajectory']['speed'][time_index])
routes_data[route][nLane]['x'].append(object_list[obj_id]['trajectory']['x'][time_index])
routes_data[route][nLane]['y'].append(object_list[obj_id]['trajectory']['y'][time_index])
obj_allocate = False
break
for route_name in route_names:
# Sort vehicle data in lane according to te position from intersection point
sort_ids(routes_data, route_name, routes_information[route_name]['direction'])
# Finding queue points if exists for all routes considering all vehicle speeds are 0 in queue
_queue_spillback_info = find_queue_info(routes_data,Lane_information,temporal_info ,route_name, routes_information[route_name]['lane_axis'] )
if _queue_spillback_info['queue']['exist']:
for l_num in range(0, len(_queue_spillback_info['queue']['points'])):
for _queue in _queue_spillback_info['queue']['points'][l_num]:
if (len(_queue[max_length_parameter]) > routes_information[route_name]['max_queue']['length']):
routes_information[route_name]['max_queue']['length'] = len(_queue['points'])
routes_information[route_name]['max_queue']['points'] = _queue['points']
routes_information[route_name]['max_queue']['time'] = time_stamp
routes_information[route_name]['max_queue']['n_vehicles'] =len(_queue['n_vehicles'])
if DISP:
pts = np.array(_queue['points'], np.int32)
cv2.polylines(image, [pts], False, (255,100,70), 2)
if _queue_spillback_info['spillback']['exist']:
for n_spill in range(0,len(_queue_spillback_info['spillback']['points'])):
spill_point = _queue_spillback_info['spillback']['points'][n_spill]
spill_id = _queue_spillback_info['spillback']['ID'][n_spill]
spill_index = object_list[int(spill_id) - 1]['trajectory']['time'].index(time_stamp)
spill_data = {}
spill_data['Longitude'] = str(object_list[int(spill_id)-1]['trajectory']['lon'][spill_index])
spill_data['Latitude'] = str(object_list[int(spill_id) - 1]['trajectory']['lon'][spill_index])
spill_data['Object_ID'] = spill_id
spill_data['Time'] = time_stamp
spill_data['SpillBack_Route_area'] = route_name
spill_back_data_list.append(spill_data)
if DISP:
spill_point = _queue_spillback_info['spillback']['points'][n_spill]
cv2.circle(image, (int(spill_point[0]), int(spill_point[1])), 4, (0,0,255), 2)
#exist1, slippback_data = find_spillback_info(temporal_info)
temporal_info.pop(0)
temporal_info.append(routes_data)
if DISP:
image = np.flipud(image)
image = np.array(image)
cv2.putText(image, 'Time: ' + str(time_stamp), (image.shape[1] - 400, 30), font, 0.7, (255, 100, 100), 1, cv2.LINE_AA)
cv2.imshow("UAS Trajectory View",image)
cv2.waitKey(1)
# Make a list for temporal information
with open('results.csv', mode='w') as results_file:
writer_q = csv.DictWriter(results_file, fieldnames = ['Route_name','Time_stamp','Maximum_queue_length_points','Maximum_queue_length_meters', 'Num_vehicles','Coordinates'])
writer_q.writeheader()
for route_name in route_names:
if (routes_information[route_name]['max_queue']['length'] > 0):
coordinates = []
for pnum in range(0, len(routes_information[route_name]['max_queue']['points'])):
coordinates.append((((routes_information[route_name]['max_queue']['points'][pnum][0] / float(data['traj_props']['scale_trajectory'])) + traj_props['lon_min']),
((routes_information[route_name]['max_queue']['points'][pnum][1] / float(data['traj_props']['scale_trajectory'])) + traj_props['lat_min'])))
distance = finddist(coordinates[0], coordinates[-1])
dist_m = distance * 112 * 1000 # world coordinates to meters
writer_q.writerow({'Route_name':route_name,
'Time_stamp': str(routes_information[route_name]['max_queue']['time']),
'Maximum_queue_length_points':routes_information[route_name]['max_queue']['length'],
'Maximum_queue_length_meters' :dist_m,
'Num_vehicles':routes_information[route_name]['max_queue']['n_vehicles'],
'Coordinates': coordinates}
)
if (len(spill_back_data_list) > 0 ):
spill_back_fields = list(spill_back_data_list[0].keys())
writer = csv.DictWriter(results_file, fieldnames=spill_back_fields)
writer.writeheader()
for spill in spill_back_data_list:
writer.writerow(spill)
```
| github_jupyter |
```
# Copyright 2021 Google LLC
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
# Notebook authors: Kevin P. Murphy (murphyk@gmail.com)
# and Mahmoud Soliman (mjs@aucegypt.edu)
# This notebook reproduces figures for chapter 15 from the book
# "Probabilistic Machine Learning: An Introduction"
# by Kevin Murphy (MIT Press, 2021).
# Book pdf is available from http://probml.ai
```
<a href="https://opensource.org/licenses/MIT" target="_parent"><img src="https://img.shields.io/github/license/probml/pyprobml"/></a>
<a href="https://colab.research.google.com/github/probml/pml-book/blob/main/pml1/figure_notebooks/chapter15_neural_networks_for_sequences_figures.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Figure 15.1:<a name='15.1'></a> <a name='rnn'></a>
Recurrent neural network (RNN) for generating a variable length output sequence $ \bm y _ 1:T $ given an optional fixed length input vector $ \bm x $
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.1.png" width="256"/>
## Figure 15.2:<a name='15.2'></a> <a name='rnnTimeMachine'></a>
Example output of length 500 generated from a character level RNN when given the prefix ``the''. We use greedy decoding, in which the most likely character at each step is computed, and then fed back into the model. The model is trained on the book \em The Time Machine by H. G. Wells.
To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/master/notebooks-d2l/rnn_torch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
## Figure 15.3:<a name='15.3'></a> <a name='imageCaptioning'></a>
Illustration of a CNN-RNN model for image captioning. The pink boxes labeled ``LSTM'' refer to a specific kind of RNN that we discuss in \cref sec:LSTM . The pink boxes labeled $W_ \text emb $ refer to embedding matrices for the (sampled) one-hot tokens, so that the input to the model is a real-valued vector. From https://bit.ly/2FKnqHm . Used with kind permission of Yunjey Choi
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.3.png" width="256"/>
## Figure 15.4:<a name='15.4'></a> <a name='rnnBiPool'></a>
(a) RNN for sequence classification. (b) Bi-directional RNN for sequence classification
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.4_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.4_B.png" width="256"/>
## Figure 15.5:<a name='15.5'></a> <a name='biRNN'></a>
(a) RNN for transforming a sequence to another, aligned sequence. (b) Bi-directional RNN for the same task
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.5_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.5_B.png" width="256"/>
## Figure 15.6:<a name='15.6'></a> <a name='deepRNN'></a>
Illustration of a deep RNN. Adapted from Figure 9.3.1 of <a href='#dive'>[Zha+20]</a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.6.png" width="256"/>
## Figure 15.7:<a name='15.7'></a> <a name='seq2seq'></a>
Encoder-decoder RNN architecture for mapping sequence $ \bm x _ 1:T $ to sequence $ \bm y _ 1:T' $
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.7.png" width="256"/>
## Figure 15.8:<a name='15.8'></a> <a name='NMT'></a>
(a) Illustration of a seq2seq model for translating English to French. The - character represents the end of a sentence. From Figure 2.4 of <a href='#Luong2016thesis'>[Luo16]</a> . Used with kind permission of Minh-Thang Luong. (b) Illustration of greedy decoding. The most likely French word at each step is highlighted in green, and then fed in as input to the next step of the decoder. From Figure 2.5 of <a href='#Luong2016thesis'>[Luo16]</a> . Used with kind permission of Minh-Thang Luong
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.8_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.8_B.png" width="256"/>
## Figure 15.9:<a name='15.9'></a> <a name='BPTT'></a>
An RNN unrolled (vertically) for 3 time steps, with the target output sequence and loss node shown explicitly. From Figure 8.7.2 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of Aston Zhang
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.9.png" width="256"/>
## Figure 15.10:<a name='15.10'></a> <a name='GRU'></a>
Illustration of a GRU. Adapted from Figure 9.1.3 of <a href='#dive'>[Zha+20]</a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.10.png" width="256"/>
## Figure 15.11:<a name='15.11'></a> <a name='LSTM'></a>
Illustration of an LSTM. Adapted from Figure 9.2.4 of <a href='#dive'>[Zha+20]</a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.11.png" width="256"/>
## Figure 15.12:<a name='15.12'></a> <a name='stsProb'></a>
Conditional probabilities of generating each token at each step for two different sequences. From Figures 9.8.1--9.8.2 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of Aston Zhang
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.12_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.12_B.png" width="256"/>
## Figure 15.13:<a name='15.13'></a> <a name='beamSearch'></a>
Illustration of beam search using a beam of size $K=2$. The vocabulary is $\mathcal Y = \ A,B,C,D,E\ $, with size $V=5$. We assume the top 2 symbols at step 1 are A,C. At step 2, we evaluate $p(y_1=A,y_2=y)$ and $p(y_1=C,y_2=y)$ for each $y \in \mathcal Y $. This takes $O(K V)$ time. We then pick the top 2 partial paths, which are $(y_1=A,y_2=B)$ and $(y_1=C,y_2=E)$, and continue in the obvious way. Adapted from Figure 9.8.3 of <a href='#dive'>[Zha+20]</a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.13.png" width="256"/>
## Figure 15.14:<a name='15.14'></a> <a name='textCNN'></a>
Illustration of the TextCNN model for binary sentiment classification. Adapted from Figure 15.3.5 of <a href='#dive'>[Zha+20]</a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.14.png" width="256"/>
## Figure 15.15:<a name='15.15'></a> <a name='wavenet'></a>
Illustration of the wavenet model using dilated (atrous) convolutions, with dilation factors of 1, 2, 4 and 8. From Figure 3 of <a href='#wavenet'>[Aar+16]</a> . Used with kind permission of Aaron van den Oord
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.15.png" width="256"/>
## Figure 15.16:<a name='15.16'></a> <a name='attention'></a>
Attention computes a weighted average of a set of values, where the weights are derived by comparing the query vector to a set of keys. From Figure 10.3.1 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of Aston Zhang
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.16.pdf" width="256"/>
## Figure 15.17:<a name='15.17'></a> <a name='attenRegression'></a>
Kernel regression in 1d. (a) Kernel weight matrix. (b) Resulting predictions on a dense grid of test points.
To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/master/notebooks/kernel_regression_attention.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.17_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.17_B.png" width="256"/>
## Figure 15.18:<a name='15.18'></a> <a name='seq2seqAttn'></a>
Illustration of seq2seq with attention for English to French translation. Used with kind permission of Minh-Thang Luong
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.18.png" width="256"/>
## Figure 15.19:<a name='15.19'></a> <a name='translationHeatmap'></a>
Illustration of the attention heatmaps generated while translating two sentences from Spanish to English. (a) Input is ``hace mucho frio aqui.'', output is ``it is very cold here.''. (b) Input is ``ยฟtodavia estan en casa?'', output is ``are you still at home?''. Note that when generating the output token ``home'', the model should attend to the input token ``casa'', but in fact it seems to attend to the input token ``?''. Adapted from https://www.tensorflow.org/tutorials/text/nmt_with_attention
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.19_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.19_B.png" width="256"/>
## Figure 15.20:<a name='15.20'></a> <a name='EHR'></a>
Example of an electronic health record. In this example, 24h after admission to the hospital, the RNN classifier predicts the risk of death as 19.9\%; the patient ultimately died 10 days after admission. The ``relevant'' keywords from the input clinical notes are shown in red, as identified by an attention mechanism. From Figure 3 of <a href='#Rajkomar2018'>[Alv+18]</a> . Used with kind permission of Alvin Rakomar
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.20.png" width="256"/>
## Figure 15.21:<a name='15.21'></a> <a name='SNLI'></a>
Illustration of sentence pair entailment classification using an MLP with attention to align the premise (``I do need sleep'') with the hypothesis (``I am tired''). White squares denote active attention weights, blue squares are inactive. (We are assuming hard 0/1 attention for simplicity.) From Figure 15.5.2 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of Aston Zhang
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.21.png" width="256"/>
## Figure 15.22:<a name='15.22'></a> <a name='showAttendTell'></a>
Image captioning using attention. (a) Soft attention. Generates ``a woman is throwing a frisbee in a park''. (b) Hard attention. Generates ``a man and a woman playing frisbee in a field''. From Figure 6 of <a href='#showAttendTell'>[Kel+15]</a> . Used with kind permission of Kelvin Xu
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.22_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.22_B.png" width="256"/>
## Figure 15.23:<a name='15.23'></a> <a name='transformerTranslation'></a>
Illustration of how encoder self-attention for the word ``it'' differs depending on the input context. From https://ai.googleblog.com/2017/08/transformer-novel-neural-network.html . Used with kind permission of Jakob Uszkoreit
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.23.png" width="256"/>
## Figure 15.24:<a name='15.24'></a> <a name='multiHeadAttn'></a>
Multi-head attention. Adapted from Figure 9.3.3 of <a href='#dive'>[Zha+20]</a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.24.png" width="256"/>
## Figure 15.25:<a name='15.25'></a> <a name='positionalEncodingSinusoids'></a>
(a) Positional encoding matrix for a sequence of length $n=60$ and an embedding dimension of size $d=32$. (b) Basis functions for columsn 6 to 9.
To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/master/notebooks-d2l/positional_encoding.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.25_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.25_B.png" width="256"/>
## Figure 15.26:<a name='15.26'></a> <a name='transformer'></a>
The transformer. From <a href='#Weng2018attention'>[Lil18]</a> . Used with kind permission of Lilian Weng. Adapted from Figures 1--2 of <a href='#Vaswani2017'>[Ash+17]</a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.26.png" width="256"/>
## Figure 15.27:<a name='15.27'></a> <a name='attentionBakeoff'></a>
Comparison of (1d) CNNs, RNNs and self-attention models. From Figure 10.6.1 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of Aston Zhang
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.27.png" width="256"/>
## Figure 15.28:<a name='15.28'></a> <a name='VIT'></a>
The Vision Transformer (ViT) model. This treats an image as a set of input patches. The input is prepended with the special CLASS embedding vector (denoted by *) in location 0. The class label for the image is derived by applying softmax to the final ouput encoding at location 0. From Figure 1 of <a href='#ViT'>[Ale+21]</a> . Used with kind permission of Alexey Dosovitski
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.28.png" width="256"/>
## Figure 15.29:<a name='15.29'></a> <a name='transformers_taxonomy'></a>
Venn diagram presenting the taxonomy of different efficient transformer architectures. From <a href='#Tay2020transformers'>[Yi+20]</a> . Used with kind permission of Yi Tay
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.29.pdf" width="256"/>
## Figure 15.30:<a name='15.30'></a> <a name='rand_for_fast_atten'></a>
Attention matrix $\mathbf A $ rewritten as a product of two lower rank matrices $\mathbf Q ^ \prime $ and $(\mathbf K ^ \prime )^ \mkern -1.5mu\mathsf T $ with random feature maps $\boldsymbol \phi ( \bm q _i) \in \mathbb R ^M$ and $\boldsymbol \phi ( \bm v _k) \in \mathbb R ^M$ for the corresponding queries/keys stored in the rows/columns. Used with kind permission of Krzysztof Choromanski
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.30.png" width="256"/>
## Figure 15.31:<a name='15.31'></a> <a name='fatten'></a>
Decomposition of the attention matrix $\mathbf A $ can be leveraged to improve attention computations via matrix associativity property. To compute $\mathbf AV $, we first calculate $\mathbf G =( \bm k ^ \prime )^ \mkern -1.5mu\mathsf T \mathbf V $ and then $ \bm q ^ \prime \mathbf G $, resulting in linear in $N$ space and time complexity. Used with kind permission of Krzysztof Choromanski
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.31.png" width="256"/>
## Figure 15.32:<a name='15.32'></a> <a name='elmo'></a>
Illustration of ELMo bidrectional language model. Here $y_t=x_ t+1 $ when acting as the target for the forwards LSTM, and $y_t = x_ t-1 $ for the backwards LSTM. (We add \text \em bos \xspace and \text \em eos \xspace sentinels to handle the edge cases.) From <a href='#Weng2019LM'>[Lil19]</a> . Used with kind permission of Lilian Weng
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.32.png" width="256"/>
## Figure 15.33:<a name='15.33'></a> <a name='GPT'></a>
Illustration of (a) BERT and (b) GPT. $E_t$ is the embedding vector for the input token at location $t$, and $T_t$ is the output target to be predicted. From Figure 3 of <a href='#bert'>[Jac+19]</a> . Used with kind permission of Ming-Wei Chang
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.33_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.33_B.png" width="256"/>
## Figure 15.34:<a name='15.34'></a> <a name='bertEmbedding'></a>
Illustration of how a pair of input sequences, denoted A and B, are encoded before feeding to BERT. From Figure 14.8.2 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of Aston Zhang
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.34.png" width="256"/>
## Figure 15.35:<a name='15.35'></a> <a name='bert-tasks'></a>
Illustration of how BERT can be used for different kinds of supervised NLP tasks. (a) Single sentence classification (e.g., sentiment analysis); (b) Sentence-pair classification (e.g., textual entailment); (d) Single sentence tagging (e.g., shallow parsing); (d) Question answering. From Figure 4 of <a href='#bert'>[Jac+19]</a> . Used with kind permission of Ming-Wei Chang
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.35_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.35_B.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.35_C.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.35_D.png" width="256"/>
## Figure 15.36:<a name='15.36'></a> <a name='T5'></a>
Illustration of how the T5 model (``Text-to-text Transfer Transformer'') can be used to perform multiple NLP tasks, such as translating English to German; determining if a sentence is linguistic valid or not ( \bf CoLA stands for ``Corpus of Linguistic Acceptability''); determining the degree of semantic similarity ( \bf STSB stands for ``Semantic Textual Similarity Benchmark''); and abstractive summarization. From Figure 1 of <a href='#T5'>[Col+19]</a> . Used with kind permission of Colin Raffel
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.36.png" width="256"/>
## References:
<a name='wavenet'>[Aar+16]</a> V. Aaron, D. Sander, Z. Heiga, S. Karen, V. Oriol, G. Alex, K. Nal, S. Andrew and K. Koray. "WaveNet: A Generative Model for Raw Audio". abs/1609.03499 (2016). arXiv: 1609.03499
<a name='ViT'>[Ale+21]</a> D. Alexey, B. Lucas, K. A. Dirk, Z. Xiaohua, U. T. Mostafa, M. Matthias, H. G. Sylvain, U. Jakob and H. Neil. "An Image is Worth 16x16 Words: Transformers for ImageRecognition at Scale". (2021).
<a name='Rajkomar2018'>[Alv+18]</a> R. Alvin, O. Eyal, C. Kai, D. A. Nissan, H. Michaela, L. PeterJ, L. LiuXiaobing, M. Jake, S. Mimi, S. Patrik, Y. Hector, Z. Kun, Z. Yi, F. Gerardo, D. GavinE, I. Jamie, L. Quoc, L. K. Alexander, T. Justin, W. De, W. James, W. Jimbo, L. Dana, V. L, C. Katherine, P. Michael, M. MadabushiSrinivasan, S. NigamH, B. AtulJ, H. D, C. Claire, C. GregS and D. Jeffrey. "Scalable and accurate deep learning with electronic healthrecords". In: NPJ Digit Med (2018).
<a name='Vaswani2017'>[Ash+17]</a> V. Ashish, S. Noam, P. Niki, U. Jakob, J. Llion, G. AidanN, K. KaiserLukasz and P. Illia. "Attention Is All You Need". (2017).
<a name='T5'>[Col+19]</a> R. Colin, S. Noam, R. Adam, L. LeeKatherine, N. Sharan, M. Michael, Z. ZhouYanqi, L. Wei and L. PeterJ. "Exploring the Limits of Transfer Learning with a UnifiedText-to-Text Transformer". abs/1910.10683 (2019). arXiv: 1910.10683
<a name='bert'>[Jac+19]</a> D. Jacob, C. Ming-Wei, L. Kenton and T. ToutanovaKristina. "BERT: Pre-training of Deep Bidirectional Transformers forLanguage Understanding". (2019).
<a name='showAttendTell'>[Kel+15]</a> X. Kelvin, B. JimmyLei, K. Ryan, C. K. Aaron, S. Ruslan, Z. S and B. Yoshua. "Show, Attend and Tell: Neural Image Caption Generation withVisual Attention". (2015).
<a name='Weng2018attention'>[Lil18]</a> W. Lilian "Attention? Attention!". In: lilianweng.github.io/lil-log (2018).
<a name='Weng2019LM'>[Lil19]</a> W. Lilian "Generalized Language Models". In: lilianweng.github.io/lil-log (2019).
<a name='Luong2016thesis'>[Luo16]</a> M. Luong "Neural machine translation". (2016).
<a name='Tay2020transformers'>[Yi+20]</a> T. Yi, D. Mostafa, B. Dara and M. MetzlerDonald. "Efficient Transformers: A Survey". abs/2009.06732 (2020). arXiv: 2009.06732
<a name='dive'>[Zha+20]</a> A. Zhang, Z. Lipton, M. Li and A. Smola. "Dive into deep learning". (2020).
| github_jupyter |
<i>Copyright (c) Microsoft Corporation. All rights reserved.</i>
<i>Licensed under the MIT License.</i>
# Pretraining word and entity embeddings
This notebook trains word embeddings and entity embeddings for DKN initializations.
```
from gensim.test.utils import common_texts, get_tmpfile
from gensim.models import Word2Vec
import time
from utils.general import *
import numpy as np
import pickle
from utils.task_helper import *
class MySentenceCollection:
def __init__(self, filename):
self.filename = filename
self.rd = None
def __iter__(self):
self.rd = open(self.filename, 'r', encoding='utf-8', newline='\r\n')
return self
def __next__(self):
line = self.rd.readline()
if line:
return list(line.strip('\r\n').split(' '))
else:
self.rd.close()
raise StopIteration
InFile_dir = 'data_folder/my'
OutFile_dir = 'data_folder/my/pretrained-embeddings'
OutFile_dir_KG = 'data_folder/my/KG'
OutFile_dir_DKN = 'data_folder/my/DKN-training-folder'
```
Wrod2vec [4] can learn high-quality distributed vector representations that capture a large number of precise syntactic and semantic word relationships. We use word2vec algorithm implemented in Gensim [5] to generate word embeddings.
<img src="https://recodatasets.z20.web.core.windows.net/kdd2020/images%2Fword2vec.JPG" width="300">
```
def train_word2vec(Path_sentences, OutFile_dir):
OutFile_word2vec = os.path.join(OutFile_dir, r'word2vec.model')
OutFile_word2vec_txt = os.path.join(OutFile_dir, r'word2vec.txt')
create_dir(OutFile_dir)
print('start to train word embedding...', end=' ')
my_sentences = MySentenceCollection(Path_sentences)
model = Word2Vec(my_sentences, size=32, window=5, min_count=1, workers=8, iter=10) # user more epochs for better accuracy
model.save(OutFile_word2vec)
model.wv.save_word2vec_format(OutFile_word2vec_txt, binary=False)
print('\tdone . ')
Path_sentences = os.path.join(InFile_dir, 'sentence.txt')
t0 = time.time()
train_word2vec(Path_sentences, OutFile_dir)
t1 = time.time()
print('time elapses: {0:.1f}s'.format(t1 - t0))
```
We leverage a graph embedding model to encode entities into embedding vectors.
<img src="https://recodatasets.z20.web.core.windows.net/kdd2020/images%2Fkg-embedding-math.JPG" width="600">
<img src="https://recodatasets.z20.web.core.windows.net/kdd2020/images%2Fkg-embedding.JPG" width="600">
We use an open-source implementation of TransE (https://github.com/thunlp/Fast-TransX) for generating knowledge graph embeddings:
```
!bash ./run_transE.sh
```
DKN take considerations of both the entity embeddings and its context embeddings.
<img src="https://recodatasets.z20.web.core.windows.net/kdd2020/images/context-embedding.JPG" width="600">
```
##### build context embedding
EMBEDDING_LENGTH = 32
entity_file = os.path.join(OutFile_dir_KG, 'entity2vec.vec')
context_file = os.path.join(OutFile_dir_KG, 'context2vec.vec')
kg_file = os.path.join(OutFile_dir_KG, 'train2id.txt')
gen_context_embedding(entity_file, context_file, kg_file, dim=EMBEDDING_LENGTH)
load_np_from_txt(
os.path.join(OutFile_dir_KG, 'entity2vec.vec'),
os.path.join(OutFile_dir_DKN, 'entity_embedding.npy'),
)
load_np_from_txt(
os.path.join(OutFile_dir_KG, 'context2vec.vec'),
os.path.join(OutFile_dir_DKN, 'context_embedding.npy'),
)
format_word_embeddings(
os.path.join(OutFile_dir, 'word2vec.txt'),
os.path.join(InFile_dir, 'word2idx.pkl'),
os.path.join(OutFile_dir_DKN, 'word_embedding.npy')
)
```
## Reference
\[1\] Wang, Hongwei, et al. "DKN: Deep Knowledge-Aware Network for News Recommendation." Proceedings of the 2018 World Wide Web Conference on World Wide Web. International World Wide Web Conferences Steering Committee, 2018.<br>
\[2\] Knowledge Graph Embeddings including TransE, TransH, TransR and PTransE. https://github.com/thunlp/KB2E <br>
of the 58th Annual Meeting of the Association for Computational Linguistics. https://msnews.github.io/competition.html <br>
\[3\] GloVe: Global Vectors for Word Representation. https://nlp.stanford.edu/projects/glove/ <br>
\[4\] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Distributed representations of words and phrases and their compositionality. In Proceedings of the 26th International Conference on Neural Information Processing Systems - Volume 2 (NIPSโ13). Curran Associates Inc., Red Hook, NY, USA, 3111โ3119. <br>
\[5\] Gensim Word2vec embeddings : https://radimrehurek.com/gensim/models/word2vec.html <br>
| github_jupyter |
##Mounting Drive
```
from google.colab import drive
drive.mount('/content/drive')
```
## Installing and Importing required Libraries
```
!pip install -q transformers
import numpy as np
import pandas as pd
from sklearn import metrics
import transformers
import torch
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
from transformers import RobertaTokenizer, RobertaModel, RobertaConfig
from tqdm.notebook import tqdm
from transformers import get_linear_schedule_with_warmup
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
torch.cuda.get_device_name(0)
```
## Data Preprocessing
```
df = pd.read_csv("/content/drive/My Drive/bert-multilabel/train.csv")
df['list'] = df[df.columns[3:]].values.tolist()
new_df = df[['TITLE', 'ABSTRACT', 'list']].copy()
new_df.head()
```
## Model Configurations
```
# Defining some key variables that will be used later on in the training
MAX_LEN = 512
TRAIN_BATCH_SIZE = 8
VALID_BATCH_SIZE = 4
EPOCHS = 3
LEARNING_RATE = 1e-05
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
```
## Creating Custom Dataset class
```
class CustomDataset(Dataset):
def __init__(self, dataframe, tokenizer, max_len):
self.tokenizer = tokenizer
self.data = dataframe
self.abstract = dataframe.ABSTRACT
self.title = dataframe.TITLE
self.targets = self.data.list
self.max_len = max_len
self.max_len_title = 200
def __len__(self):
return len(self.abstract)
def __getitem__(self, index):
abstract = str(self.abstract[index])
title = str(self.title[index])
abstract = " ".join(abstract.split())
title = " ".join(abstract.split())
inputs_abstract = self.tokenizer.encode_plus(
abstract,
None,
add_special_tokens = True,
max_length = self.max_len,
pad_to_max_length = True,
truncation = True
)
inputs_title = self.tokenizer.encode_plus(
title,
None,
add_special_tokens = True,
max_length = self.max_len_title,
pad_to_max_length = True,
truncation = True
)
ids_abstract = inputs_abstract['input_ids']
mask_abstract = inputs_abstract['attention_mask']
ids_title = inputs_title['input_ids']
mask_title = inputs_title['attention_mask']
return{
'ids_abstract': torch.tensor(ids_abstract, dtype=torch.long),
'mask_abstract': torch.tensor(mask_abstract, dtype=torch.long),
'ids_title': torch.tensor(ids_title, dtype=torch.long),
'mask_title': torch.tensor(mask_title, dtype=torch.long),
'targets': torch.tensor(self.targets[index], dtype=torch.float)
}
train_size = 0.8
train_dataset=new_df.sample(frac=train_size,random_state=200)
test_dataset=new_df.drop(train_dataset.index).reset_index(drop=True)
train_dataset = train_dataset.reset_index(drop=True)
print("FULL Dataset: {}".format(new_df.shape))
print("TRAIN Dataset: {}".format(train_dataset.shape))
print("TEST Dataset: {}".format(test_dataset.shape))
training_set = CustomDataset(train_dataset, tokenizer, MAX_LEN)
testing_set = CustomDataset(test_dataset, tokenizer, MAX_LEN)
train_params = {'batch_size': TRAIN_BATCH_SIZE,
'shuffle': True,
'num_workers': 0
}
test_params = {'batch_size': VALID_BATCH_SIZE,
'shuffle': True,
'num_workers': 0
}
training_loader = DataLoader(training_set, **train_params)
testing_loader = DataLoader(testing_set, **test_params)
```
## Roberta Model
```
# Creating the customized model, by adding a drop out and a dense layer on top of roberta to get the final output for the model.
class RobertaMultiheadClass(torch.nn.Module):
def __init__(self):
super(RobertaMultiheadClass, self).__init__()
self.roberta = transformers.RobertaModel.from_pretrained('roberta-base')
self.drop = torch.nn.Dropout(0.3)
self.linear_1 = torch.nn.Linear(1536, 768)
self.linear_2 = torch.nn.Linear(768, 6)
def forward(self, ids_1, mask_1, ids_2, mask_2):
_, output_1= self.roberta(ids_1, attention_mask = mask_1)
_, output_2= self.roberta(ids_2, attention_mask = mask_2)
output = torch.cat((output_1, output_2), dim = 1)
output = self.drop(output)
output = self.linear_1(output)
output = self.drop(output)
output = self.linear_2(output)
return output
model = RobertaMultiheadClass()
model.to(device)
```
## Hyperparameters & Loss function
```
def loss_fn(outputs, targets):
return torch.nn.BCEWithLogitsLoss()(outputs, targets)
param_optimizer = list(model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.001,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = torch.optim.AdamW(optimizer_parameters, lr=3e-5)
num_training_steps = int(len(train_dataset) / TRAIN_BATCH_SIZE * EPOCHS)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps = 0,
num_training_steps = num_training_steps
)
```
## Train & Eval Functions
```
def train(epoch):
model.train()
for _,data in tqdm(enumerate(training_loader, 0), total=len(training_loader)):
ids_1 = data['ids_abstract'].to(device, dtype = torch.long)
mask_1 = data['mask_abstract'].to(device, dtype = torch.long)
ids_2 = data['ids_title'].to(device, dtype = torch.long)
mask_2 = data['mask_title'].to(device, dtype = torch.long)
targets = data['targets'].to(device, dtype = torch.float)
outputs = model(ids_1, mask_1, ids_2, mask_2)
optimizer.zero_grad()
loss = loss_fn(outputs, targets)
if _%1000==0:
print(f'Epoch: {epoch}, Loss: {loss.item()}')
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
def validation(epoch):
model.eval()
fin_targets=[]
fin_outputs=[]
with torch.no_grad():
for _, data in tqdm(enumerate(testing_loader, 0), total=len(testing_loader)):
ids_1 = data['ids_abstract'].to(device, dtype = torch.long)
mask_1 = data['mask_abstract'].to(device, dtype = torch.long)
ids_2 = data['ids_title'].to(device, dtype = torch.long)
mask_2 = data['mask_title'].to(device, dtype = torch.long)
targets = data['targets'].to(device, dtype = torch.float)
outputs = model(ids_1, mask_1, ids_2, mask_2)
fin_targets.extend(targets.cpu().detach().numpy().tolist())
fin_outputs.extend(torch.sigmoid(outputs).cpu().detach().numpy().tolist())
return fin_outputs, fin_targets
```
## Training Model
```
MODEL_PATH = "/content/drive/My Drive/roberta-multilabel/model.bin"
best_micro = 0
for epoch in range(EPOCHS):
train(epoch)
outputs, targets = validation(epoch)
outputs = np.array(outputs) >= 0.5
accuracy = metrics.accuracy_score(targets, outputs)
f1_score_micro = metrics.f1_score(targets, outputs, average='micro')
f1_score_macro = metrics.f1_score(targets, outputs, average='macro')
print(f"Accuracy Score = {accuracy}")
print(f"F1 Score (Micro) = {f1_score_micro}")
print(f"F1 Score (Macro) = {f1_score_macro}")
if f1_score_micro > best_micro:
torch.save(model.state_dict(), MODEL_PATH)
best_micro = f1_score_micro
```
## Predictions
```
PATH = "/content/drive/My Drive/roberta-multilabel/model.bin"
model = RobertaMultiheadClass()
model.load_state_dict(torch.load(PATH))
model.to(device)
model.eval()
def predict(id, abstract, title):
MAX_LENGTH = 512
inputs_abstract = tokenizer.encode_plus(
abstract,
None,
add_special_tokens=True,
max_length=512,
pad_to_max_length=True,
return_token_type_ids=True,
truncation = True
)
inputs_title = tokenizer.encode_plus(
title,
None,
add_special_tokens=True,
max_length=200,
pad_to_max_length=True,
return_token_type_ids=True,
truncation = True
)
ids_1 = inputs_abstract['input_ids']
mask_1 = inputs_abstract['attention_mask']
ids_1 = torch.tensor(ids_1, dtype=torch.long).unsqueeze(0)
mask_1 = torch.tensor(mask_1, dtype=torch.long).unsqueeze(0)
ids_2 = inputs_title['input_ids']
mask_2 = inputs_title['attention_mask']
ids_2 = torch.tensor(ids_2, dtype=torch.long).unsqueeze(0)
mask_2 = torch.tensor(mask_2, dtype=torch.long).unsqueeze(0)
ids_1 = ids_1.to(device)
mask_1 = mask_1.to(device)
ids_2 = ids_2.to(device)
mask_2 = mask_2.to(device)
with torch.no_grad():
outputs = model(ids_1, mask_1, ids_2, mask_2)
outputs = torch.sigmoid(outputs).squeeze()
outputs = np.round(outputs.cpu().numpy())
out = np.insert(outputs, 0, id)
return out
def submit():
test_df = pd.read_csv('/content/drive/My Drive/bert-multilabel/test.csv')
sample_submission = pd.read_csv('/content/drive/My Drive/bert-multilabel/sample_submission_UVKGLZE.csv')
y = []
for id, abstract, title in tqdm(zip(test_df['ID'], test_df['ABSTRACT'], test_df['TITLE']),
total=len(test_df)):
out = predict(id, abstract, title)
y.append(out)
y = np.array(y)
submission = pd.DataFrame(y, columns=sample_submission.columns).astype(int)
return submission
submission = submit()
submission
submission.to_csv("roberta_baseline.csv", index=False)
```
| github_jupyter |
# Kafka brokerใฎๆง็ฏๆ้
```
set -o pipefail
```
## Ansibleใฎ่จญๅฎ
ansibleใใคใใฃใฆKafka brokerใฏใฉในใฟ(ใจzookeeperใฏใฉในใฟ)ใๆง็ฏใใใ
ansibleใฎใคใณใใณใใชใใกใคใซใไฝๆใใใ
```
cat >inventory.yml <<EOF
all:
children:
kafka:
hosts:
server1.example.jp:
#ใใใซKafkaใใใใใใในใใ็พ
ๅใใใ่กๆซใฎใณใญใณใๅฟใใใซ
vars:
ansible_user: piyo #ๅฎ่กใฆใผใถใฏๅคๆดใใ
ansible_ssh_private_key_file: ~/.ssh/id_rsa
ansible_python_interpreter: /usr/bin/python3
zookeeper:
hosts:
server1.example.jp:
#ใใใซZookeeperใใใใใใในใใ็พ
ๅใใใ่กๆซใฎใณใญใณใๅฟใใใซ
vars:
ansible_user: piyo #ๅฎ่กใฆใผใถใฏๅคๆดใใ
ansible_ssh_private_key_file: ~/.ssh/id_rsa
ansible_python_interpreter: /usr/bin/python3
EOF
```
ansibleใฎ่จญๅฎใใกใคใซใไฝๆใใใ
```
cat >ansible.cfg <<EOF
[defaults]
command_warnings = False
inventory = ./inventory.yml
EOF
```
ansibleใ้ใใฆzookeeperใจkafkaใๅฎ่กใใใในใใซใขใฏใปในใงใใใฎใ็ขบ่ชใใใ
```
ansible all -m ping
```
Dockerใใคใณในใใผใซใใใฆใใใฎใ็ขบ่ชใใใ
```
ansible all -m command -a "docker version"
```
## ZooKeeperใฏใฉในใฟใฎๆง็ฏ
zookeeperใฎdockerใคใกใผใธๅใจใใผใ็ชๅทใ่จญๅฎใใใ
PPORT,LPORT,CPORTใฏใปใใฎใตใผใในใฎใใผใ็ชๅทใจใถใคใใฃใฆใใใฎใงใชใใใฐๅคๆดใใๅฟ
่ฆใฏใชใใ
zookeeperใฎไปๆงใซใใCPORTใฏๅคๆดใงใใชใใ
```
DOCKER_IMAGE="zookeeper"
ZK_PPORT=12888 # peer
ZK_LPORT=13888 # leader
ZK_CPORT=2181 # client
```
zookeeperใ่ตทๅใใในใฏใชใใใ็ๆใใใ
```
LIST_ZOOKEEPER_HOSTS="$(ansible-inventory --list | jq -r '.zookeeper.hosts|.[]')"
list_zookeeper_hosts() {
echo "$LIST_ZOOKEEPER_HOSTS"
}
print_servers() {
local MYID="$1"
local HOST
local ID=1
local SERVER
list_zookeeper_hosts | while read HOST; do
if [ "$ID" = "$MYID" ]; then
local ANYADDR="0.0.0.0"
HOST="$ANYADDR"
fi
printf "server.$ID=$HOST:$ZK_PPORT:$ZK_LPORT "
ID=$((ID + 1))
done
printf "\n"
}
print_docker_run() {
local DIR="$1"
local ID=1
list_zookeeper_hosts | while read HOST; do
#local NAME="sinetstream-zookeeper-$ID"
local NAME="sinetstream-zookeeper"
local SERVERS="$(print_servers "$ID")"
{
printf "docker run --rm --detach --name '$NAME' --env 'ZOO_MY_ID=$ID' --env 'ZOO_SERVERS=$SERVERS' --publish $ZK_PPORT:$ZK_PPORT --publish $ZK_LPORT:$ZK_LPORT --publish $ZK_CPORT:$ZK_CPORT $DOCKER_IMAGE"
} > "$DIR/zookeeper-docker_run-${HOST}.sh"
ID=$((ID + 1))
done
}
mkdir -p tmp &&
rm -f tmp/*.sh &&
print_docker_run tmp &&
ls -l tmp/*.sh
```
ansibleใใคใใฃใฆzookeeperใตใผใใผใ่ตทๅใใใ
```
ansible zookeeper -m script -a 'tmp/zookeeper-docker_run-{{inventory_hostname}}.sh'
ansible zookeeper -m command -a 'docker ps --filter "name=sinetstream-zookeeper"'
```
## Kafkaใฏใฉในใฟ
ๅ
ฌๅผใฎKafkaไธๅผใใใฆใณใญใผใใใใ
ๆๅ
ใงใใฆใณใญใผใใใฆใใๅใในใใซใณใใผใใใ
```
KAFKA="kafka_2.12-2.4.1"
wget --mirror http://ftp.kddilabs.jp/infosystems/apache/kafka/2.4.1/$KAFKA.tgz
ansible kafka -m command -a "mkdir -p \$PWD/sinetstream-kafka"
ansible kafka -m copy -a "src=$KAFKA.tgz dest=\$PWD/sinetstream-kafka/"
```
### KafkaใใญใผใซใผใใใใใCentOSใฎใณใณใใใไฝๆ
่ช่จผๆนๆณใใฉใใใฒใจใค้ธๆใใใ
```
#KAFKA_AUTH=SSL # SSL/TLS่ช่จผ๏ผใฏใฉใคใขใณใ่ช่จผ๏ผ; ้ไฟกใซTLSใใคใใใ่ช่จผใซ่จผๆๆธใใคใใ
KAFKA_AUTH=SASL_SSL_SCRAM # SCRAM่ช่จผ/TLS; ้ไฟกใซTLSใใคใใใ่ช่จผใซSCRAM(ใในใฏใผใ)ใใคใใ
#KAFKA_AUTH=SASL_SSL_PLAIN # ใในใฏใผใ่ช่จผ/TLS; ้ไฟกใซTLSใใคใใใ่ช่จผใซๅนณๆใในใฏใผใใใคใใ
#KAFKA_AUTH=PLAINTEXT # ้ไฟกใฏๆๅทๅใใใใ่ช่จผใใชใ โปใคใใฃใฆใฏใใใชใ
```
truststore/keystoreใไฟ่ญทใใใใใฎใในใฏใผใใ่จญๅฎใใใใในใฏใผใใฏ้ฉๅฝใซๅผทๅบฆใฎ้ซใๆๅญๅใๆๅฎใใใ
```
TRUSTSTORE_PASSWORD="trust-pass-00"
KEYSTORE_PASSWORD="key-pass-00"
```
SCAM่ช่จผใใในใฏใผใ่ช่จผใไฝฟใๅ ดๅใซใฏใใฆใผใถใผใฎใชในใใจๅใฆใผใถใฎใในใฏใผใใ่จญๅฎใใใ
SSL/TLS่ช่จผใไฝฟใๅ ดๅใฏใในใฏใผใใฏ่จญๅฎใใชใใฆใใใใฆใผใถใฎใชในใใ ใใ่จญๅฎใใใSSL/TLS่ช่จผใงใฎใฆใผใถๅใฏ่จผๆๆธใฎCommon Nameใงใใใ
ใฆใผใถ `admin` ใฏkafkaใใญใผใซ้ใฎ้ไฟกใซใคใใ็นๅฅใชใฆใผใถใชใฎใงๆถใใฆใฏใใใชใใ
ใในใฏใผใใฏๅๅใชๅผทๅบฆใใใฃใใใฎใซๅคๆดใในใใงใใใ
```
USER_LIST="user01 user02 user03 CN=client0,C=JP"
PASSWORD_admin="admin-pass"
PASSWORD_user01="user01-pass"
PASSWORD_user02="user02-pass"
PASSWORD_user03="user03-pass"
```
่ชๅฏ(ACL)ใฎ่จญๅฎใ
```
KAFKA_ACL_DEFAULT_TO_ALLOW="false" # trueใซ่จญๅฎใใใจACLใ่จญๅฎใใใฆใใชใใฆใผใถใฏใขใฏใปในใ่จฑๅฏใใใใ
ACL_user01="readwrite"
ACL_user02="write"
ACL_user03="read"
ACL_CN_client0_C_JP="readwrite" # ่ฑๆฐๅญไปฅๅคใฏ _ ใซ็ฝฎใๆใใฆ
```
```
KAFKA_PORT_SSL=9093
KAFKA_PORT_SASL_SSL=9093
```
่ช่จผๆนๆณใฎ่ฉณ็ดฐใชใใฉใกใผใฟใ่จญๅฎใใใ
```
SCRAM_MECHANISM="SCRAM-SHA-256"
```
Kafkaใใญใผใซใๅใใใณใณใใใไฝใใ
```
ansible kafka -m command -a "docker run \
--detach \
--interactive \
--net host \
--name sinetstream-kafka \
--volume \$PWD/sinetstream-kafka:/sinetstream-kafka \
centos:7"
ansible kafka -m command -a "docker exec sinetstream-kafka true"
```
ใณใณใใใซKafkaใฎๅฎ่กใซๅฟ
่ฆใชใฝใใใฆใงใขใใคใณในใใผใซใใใ
```
ansible kafka -m command -a "docker exec sinetstream-kafka yum update -y"
ansible kafka -m command -a "docker exec sinetstream-kafka yum install -y java-1.8.0-openjdk openssl"
ansible kafka -m command -a "docker exec sinetstream-kafka tar xf /sinetstream-kafka/$KAFKA.tgz" &&
ansible kafka -m command -a "docker exec sinetstream-kafka ln -s /$KAFKA /kafka"
```
### Kafkaใใญใผใซใฎ่จญๅฎ
kafkaใใญใผใซใฎ่จญๅฎใใกใคใซใ็ๆใใใ
```
LIST_KAFKA_HOSTS="$(ansible-inventory --list | jq -r '.kafka.hosts|.[]')"
list_kafka_hosts() {
echo "$LIST_KAFKA_HOSTS"
}
print_server_properties() {
local HOST="$1"
local ID="$2"
echo "broker.id=${ID}"
local ZKHOST
printf "zookeeper.connect="
list_zookeeper_hosts | sed "s/\$/:${ZK_CPORT}/" | paste -s -d,
printf "listeners="
{
case "$KAFKA_AUTH" in
PLAINTEXT) echo "PLAINTEXT://:${KAFKA_PORT_PLAINTEXT}" ;;
SSL) echo "SSL://:${KAFKA_PORT_SSL}" ;;
SASL_SSL*) echo "SASL_SSL://:${KAFKA_PORT_SASL_SSL}"
echo "SSL://:$((KAFKA_PORT_SASL_SSL+1))" ;;
esac
} | paste -s -d,
printf "advertised.listeners="
{
case "$KAFKA_AUTH" in
PLAINTEXT) echo "PLAINTEXT://${HOST}:${KAFKA_PORT_PLAINTEXT}" ;;
SSL) echo "SSL://${HOST}:${KAFKA_PORT_SSL}" ;;
SASL_SSL*) echo "SASL_SSL://${HOST}:${KAFKA_PORT_SASL_SSL}"
echo "SSL://${HOST}:$((KAFKA_PORT_SASL_SSL+1))" ;; # for inter-broker
esac
} | paste -s -d,
# CA่จผๆๆธใฎ่จญๅฎ
echo "ssl.truststore.location=/sinetstream-kafka/truststore.p12"
echo "ssl.truststore.password=${TRUSTSTORE_PASSWORD}"
echo "ssl.truststore.type=pkcs12"
# ใตใผใใผ็งๅฏ้ตใฎ่จญๅฎ
echo "ssl.keystore.location=/sinetstream-kafka/keystore.p12"
echo "ssl.keystore.password=${KEYSTORE_PASSWORD}"
echo "ssl.keystore.type=pkcs12"
case "$KAFKA_AUTH" in
SSL)
# SSL/TLS่ช่จผ๏ผใฏใฉใคใขใณใ่ช่จผ๏ผ
echo "ssl.client.auth=required"
echo "security.inter.broker.protocol=SSL"
;;
SASL_SSL_SCRAM)
# SCRAM่ช่จผ/TLS
echo "ssl.client.auth=required"
echo "security.inter.broker.protocol=SSL"
echo "sasl.enabled.mechanisms=${SCRAM_MECHANISM}"
#echo "sasl.mechanism.inter.broker.protocol=${SCRAM_MECHANISM}"
local scram_mechanism="$(echo "${SCRAM_MECHANISM}" | tr '[A-Z]' '[a-z]')"
echo "listener.name.sasl_ssl.${scram_mechanism}.sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \\"
echo " username=admin password=${PASSWORD_admin};"
;;
SASL_SSL_PLAIN)
# ใในใฏใผใ่ช่จผ/TLS
echo "ssl.client.auth=required"
echo "security.inter.broker.protocol=SSL"
echo "sasl.enabled.mechanisms=PLAIN"
#echo "sasl.mechanism.inter.broker.protocol=PLAIN"
echo "listener.name.sasl_ssl.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \\"
echo " username=admin password=${PASSWORD_admin} \\"
local USER PASSWORD
for USER in ${USER_LIST}; do
eval PASSWORD=\$PASSWORD_${USER}
echo " user_${USER}=\"${PASSWORD}\" \\"
done
echo " ;"
;;
esac
# ่ชๅฏ
echo "authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer" # ZooKeeperใซ่จ้ฒใใใฆใใACL่จญๅฎใซใใ่ชๅฏ
echo "allow.everyone.if.no.acl.found=${KAFKA_ACL_DEFAULT_TO_ALLOW}"
echo "super.users=User:admin" # adminใซใฏ็นๆจฉใไธใใ
}
ID=1
tar x -f $KAFKA.tgz --to-stdout $KAFKA/config/server.properties >server.properties &&
mkdir -p tmp &&
rm -f tmp/*.properties &&
list_kafka_hosts | while read HOST; do
{
cat server.properties
print_server_properties "$HOST" "$ID"
} >"tmp/server-${HOST}.properties"
ID=$((ID + 1))
done
ls -l tmp/server-*.properties
```
kafkaใใญใผใซใฎ่จญๅฎใใกใคใซใๅใในใใซใณใใผใใใ
```
ansible kafka -m copy -a "src=tmp/server-{{inventory_hostname}}.properties dest=\$PWD/sinetstream-kafka/server.properties"
```
### SSL/TLSใฎใใใฎ่จผๆๆธใ่จญๅฎ
opensslใใคใใฃใฆPEMๅฝขๅผใฎ่จผๆๆธใkafkaใใญใผใซใๆฑใใPKCS#12(p12)ๅฝขๅผใซๅคๆใใใ
CA่จผๆๆธใปใตใผใ็งๅฏ้ตใปใตใผใ่จผๆๆธใkafkaใใญใผใซใฎๅใใใณใณใใๅ
ใซใณใใผใใใ
่ชๅทฑ็ฝฒๅCA่จผๆๆธใฎๅ ดๅใฏCA็งๅฏ้ตใใณใใผใใใ
```
CA_CERT_PATH=./cacert.pem
CA_KEY_PATH=NONE
CA_KEY_PATH=./cakey.pem # CA่จผๆๆธใ่ชๅทฑ็ฝฒๅใฎๅ ดๅใฏCA็งๅฏ้ตใๆๅฎใใ
BROKER_CERT_PATH=./broker.crt
BROKER_KEY_PATH=./broker.key
# ไปฅไธใๅคๆดใใชใใฆใใ
CA_CERT_FILE=$(basename "${CA_CERT_PATH}")
BROKER_CERT_FILE=$(basename "${BROKER_CERT_PATH}")
BROKER_KEY_FILE=$(basename "${BROKER_KEY_PATH}")
if [ "x$CA_KEY_PATH" != "xNONE" ]; then
CA_KEY_FILE=$(basename "${CA_KEY_PATH}")
else
CA_KEY_FILE=""
fi
ansible kafka -m copy -a "src=${CA_CERT_PATH} dest=\$PWD/sinetstream-kafka/${CA_CERT_FILE}" &&
ansible kafka -m copy -a "src=${BROKER_CERT_PATH} dest=\$PWD/sinetstream-kafka/${BROKER_CERT_FILE}" &&
ansible kafka -m copy -a "src=${BROKER_KEY_PATH} dest=\$PWD/sinetstream-kafka/${BROKER_KEY_FILE}" &&
if [ -n "${CA_KEY_FILE}" ]; then
ansible kafka -m copy -a "src=${CA_KEY_PATH} dest=\$PWD/sinetstream-kafka/${CA_KEY_FILE}"
fi
```
CA่จผๆๆธใๅคๆใใฆtruststoreใซ็ป้ฒใใใ
```
ansible kafka -m command -a "docker exec sinetstream-kafka \
openssl pkcs12 -export \
-in sinetstream-kafka/${CA_CERT_FILE} \
${CA_KEY_FILE:+-inkey sinetstream-kafka/${CA_KEY_FILE}} \
-name private-ca \
-CAfile sinetstream-kafka/${CA_CERT_FILE}\
-caname private-ca \
-out sinetstream-kafka/truststore.p12 \
-passout pass:${TRUSTSTORE_PASSWORD}" &&
ansible kafka -m command -a "docker exec sinetstream-kafka \
openssl pkcs12 -in sinetstream-kafka/truststore.p12 -passin pass:${TRUSTSTORE_PASSWORD} -info -noout"
```
ใตใผใ็งๅฏ้ตใปใตใผใ่จผๆๆธใปCA่จผๆๆธใๅคๆใใฆkeystoreใซ็ป้ฒใใใ
```
ansible kafka -m command -a "docker exec sinetstream-kafka \
openssl pkcs12 -export \
-in sinetstream-kafka/${BROKER_CERT_FILE} \
-inkey sinetstream-kafka/${BROKER_KEY_FILE} \
-name broker \
-CAfile sinetstream-kafka/${CA_CERT_FILE} \
-caname private-ca \
-out sinetstream-kafka/keystore.p12 \
-passout pass:${KEYSTORE_PASSWORD}" &&
ansible kafka -m command -a "docker exec sinetstream-kafka \
openssl pkcs12 -in sinetstream-kafka/keystore.p12 -passin pass:${KEYSTORE_PASSWORD} -info -noout"
```
### SCRAM่ช่จผใฎ่จญๅฎ
ใในใฏใผใใzookeeperใซไฟๅญใใใ
```
if [ "x$KAFKA_AUTH" = "xSASL_SSL_SCRAM" ]; then
ZK1="$(list_zookeeper_hosts | head -1)"
KAFKA1="$(list_kafka_hosts | head -1)"
for USER in admin ${USER_LIST}; do
eval PASSWORD=\$PASSWORD_${USER}
ansible kafka --limit="${KAFKA1}" -m command -a "docker exec sinetstream-kafka \
/kafka/bin/kafka-configs.sh --zookeeper ${ZK1}:${ZK_CPORT} --alter \
--entity-type users \
--entity-name ${USER} \
--add-config 'SCRAM-SHA-256=[iterations=8192,password=${PASSWORD}]'"
done &&
ansible kafka -m command -a "docker exec sinetstream-kafka \
/kafka/bin/kafka-configs.sh --zookeeper ${ZK1}:${ZK_CPORT} --describe --entity-type users"
fi
```
### ่ชๅฏ(ACL)ใฎ่จญๅฎ
ใใญใผใซใใคใใฃใฆใใใตใผใ่จผๆๆธใฎCommon Nameใ่จญๅฎใใใใใญใผใซ้้ไฟกใฎ่ชๅฏใงๅฟ
่ฆใจใชใใ
```
ADMIN_USER="CN=server1.example.jp,C=JP"
ZK1="$(list_zookeeper_hosts | head -1)"
KAFKA1="$(list_kafka_hosts | head -1)"
ansible kafka --limit="${KAFKA1}" -m command -a "docker exec sinetstream-kafka \
/kafka/bin/kafka-acls.sh --authorizer-properties zookeeper.connect=${ZK1}:${ZK_CPORT} \
--add --allow-principal User:${ADMIN_USER} --cluster --operation All" &&
for USER in ${USER_LIST}; do
USER1=$(echo "$USER" | sed 's/[^[:alnum:]]/_/g') # ใตใใฟใคใบ
eval ACL=\$ACL_${USER1}
case "${ACL}" in
*write*)
ansible kafka --limit="${KAFKA1}" -m command -a "docker exec sinetstream-kafka \
/kafka/bin/kafka-acls.sh --authorizer-properties zookeeper.connect=${ZK1}:${ZK_CPORT} \
--add --allow-principal User:${USER} \
--producer --topic '*'"
;;
esac
case "${ACL}" in
*read*)
ansible kafka --limit="${KAFKA1}" -m command -a "docker exec sinetstream-kafka \
/kafka/bin/kafka-acls.sh --authorizer-properties zookeeper.connect=${ZK1}:${ZK_CPORT} \
--add --allow-principal User:${USER} \
--consumer --topic '*' --group '*'"
;;
esac
done
ansible kafka --limit="${KAFKA1}" -m command -a "docker exec sinetstream-kafka \
/kafka/bin/kafka-acls.sh --authorizer-properties zookeeper.connect=${ZK1}:${ZK_CPORT} \
--list"
```
### Kafkaใใญใผใซใผ่ตทๅ
```
ansible kafka -m command -a "docker exec --detach sinetstream-kafka \
/kafka/bin/kafka-server-start.sh /sinetstream-kafka/server.properties"
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dataset = pd.read_csv('../data/2016-11-22-reimbursements.xz',
dtype={'document_id': np.str,
'congressperson_id': np.str,
'congressperson_document': np.str,
'term_id': np.str,
'cnpj_cpf': np.str,
'reimbursement_number': np.str},
low_memory=False)
dataset = dataset[dataset['year']==2016]
dataset.head()
```
# Find spends: congress person per month
```
def find_spends_by_month(df, applicant_id):
'''
Return a dataframe with the sum of values of spends by month
of the congress person of "applicant_id"
Parameters:
- df: pandas dataframe to be sliced
- applicant_id: unique id of the congress person
Ex: find_spends_by_month(df, 731)
Result dataframe contains:
- 1/Jan sum
- 2/Feb sum
- 3/Mar sum
- 4/Apr sum
- 5/May sum
- 6/Jun sum
- 7/Jul sum
- 8/Aug sum
- 9/Sep sum
- 10/Oct sum
- 11/Nov sum
- 12/Dec sum
- name
'''
months={1:"Jan",
2:"Feb",
3:"Mar",
4:"Apr",
5:"May",
6:"Jun",
7:"Jul",
8:"Aug",
9:"Sep",
10:"Oct",
11:"Nov",
12:"Dec"}
df_applicant = df[df.applicant_id == applicant_id]
result = {
"name":df_applicant["congressperson_name"].unique()
}
for m in months.keys():
data = df_applicant[df.month == m]
result["{:>02}".format(m) + "/" + months[m]] = data.total_net_value.sum()
df_final = pd.DataFrame([result])
ax = df_final.plot(kind='bar', title ="Congress Person Spends by Month", figsize=(25, 20), legend=True, fontsize=12)
ax.set_xlabel("Month", fontsize=12)
ax.set_ylabel("Value", fontsize=12)
plt.show()
return pd.DataFrame([result])
find_spends_by_month(dataset, 731)
```
# Find spends: Congress Person per Subquotas
```
def find_spends_by_subquota(df, applicant_id):
'''
Return a dataframe with the sum of values of spends by subquotas
of the congress person of "applicant_id"
Parameters:
- df: pandas dataframe to be sliced
- applicant_id: unique id of the congress person
'''
df_applicant = df[df.applicant_id == applicant_id]
result = {
"name":df_applicant["congressperson_name"].unique()
}
for c in df["subquota_description"].unique():
data = df_applicant[df.subquota_description == c]
result[c] = data.total_net_value.sum()
df_final = pd.DataFrame([result])
ax = df_final.plot(kind='bar', title ="Congress Person Spends by Subquotas", figsize=(25, 20), legend=True, fontsize=12)
ax.set_xlabel("Subquotas", fontsize=12)
ax.set_ylabel("Value", fontsize=12)
plt.show()
return pd.DataFrame([result])
find_spends_by_subquota(dataset, 731)
def find_spends_by_subquota(df, applicant_id, month=None):
'''
Return a dataframe with the sum of values of spends by subquotas
of the congress person of "applicant_id" and month "month"
Parameters:
- df: pandas dataframe to be sliced
- applicant_id: unique id of the congress person
'''
df_applicant = df[df.applicant_id == applicant_id]
result = {
"name":df_applicant["congressperson_name"].unique(),
"total": 0
}
if month != None:
df_applicant = df_applicant[df_applicant.month==month]
for c in df["subquota_description"].unique():
data = df_applicant[df.subquota_description == c]
result[c] = data.total_net_value.sum()
result["total"] += result[c]
df_final = pd.DataFrame([result])
ax = df_final.plot(kind='bar', title ="Congress Person", figsize=(25, 20), legend=True, fontsize=12)
ax.set_xlabel("Name", fontsize=12)
ax.set_ylabel("Value", fontsize=12)
plt.show()
return pd.DataFrame([result])
find_spends_by_subquota(dataset, 731, 3)
```
# Find spends: all congress people
```
def find_sum_of_values(df, aggregator, property):
'''
Return a dataframe with the statistics of values from "property"
aggregated by unique values from the column "aggregator"
Parameters:
- df: pandas dataframe to be sliced
- aggregator: dataframe column that will be
filtered by unique values
- property: dataframe column containing values to be summed
Ex: find_sum_of_values(data, 'congressperson_name', 'net_value')
Result dataframe contains (for each aggregator unit):
- property sum
- property mean value
- property max value
- property mean value
- number of occurences in total
'''
total_label = '{}_total'.format(property)
max_label = '{}_max'.format(property)
mean_label = '{}_mean'.format(property)
min_label = '{}_min'.format(property)
result = {
'occurences': [],
aggregator: df[aggregator].unique(),
max_label: [],
mean_label: [],
min_label: [],
total_label: [],
}
for item in result[aggregator]:
if isinstance(df[aggregator].iloc[0], str):
item = str(item)
values = df[df[aggregator] == item]
property_total = int(values[property].sum())
occurences = int(values[property].count())
result[total_label].append(property_total)
result['occurences'].append(occurences)
result[mean_label].append(property_total/occurences)
result[max_label].append(np.max(values[property]))
result[min_label].append(np.min(values[property]))
return pd.DataFrame(result).sort_values(by=aggregator)
df = find_sum_of_values(dataset, "congressperson_name", "total_net_value")
df[:10]
```
# Finding congress people that spent more than 500 thousand per year
```
df = df[df.total_net_value_total > 500000]
df
ax = df[["total_net_value_total"]].plot(kind='bar', title ="Congress Person Spends", figsize=(15, 10), legend=True, fontsize=12)
ax.set_xlabel("Congress Person", fontsize=12)
ax.set_ylabel("Value", fontsize=12)
plt.show()
```
| github_jupyter |
```
# default_exp visrectrans
```
# VisRecTrans
> A class for creating a custom [Vision Transformer (ViT)](https://arxiv.org/abs/2010.11929) model for visual recognition
```
#export
#hide
from nbdev.showdoc import *
from fastai.vision.all import *
import timm
import math
import warnings
#export
#hide
class EmbedBlock (Module) :
def __init__ (self, num_patches, embed_dim) :
self.cls_tokens = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embeds = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
def forward (self, x) :
B = x.shape[0]
cls_tokens = self.cls_tokens.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim = 1)
x = x + self.pos_embeds
return x
#export
#hide
class Header (Module) :
def __init__ (self, ni, num_classes) :
self.head = nn.Linear(ni, num_classes)
def forward (self, x) :
x = x[:, 0] # Extracting the clsass token, which is used for the classification task.
x = self.head(x)
return x
#export
#hide
def custom_ViT (timm_model_name, num_patches, embed_dim, ni, num_classes, pretrained = True) :
model = timm.create_model(timm_model_name, pretrained)
module_layers = list(model.children())
return nn.Sequential(
module_layers[0],
EmbedBlock(num_patches, embed_dim),
nn.Sequential(*module_layers[1:-1]),
Header(ni, num_classes)
)
#export
#hide
# The function below is heavily inspired by "https://github.com/rwightman/pytorch-image-models/blob/5f9aff395c224492e9e44248b15f44b5cc095d9c/timm/models/vision_transformer.py"
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(layer, param, mean=0., std=1., a=-2., b=2.):
# type : (Tensor, float, float, float, float) -> Tensor
"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
tensor = layer.get_parameter(param)
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
#export
class VisRecTrans :
"""Class for setting up a vision transformer for visual recognition.
Returns a pretrained custom ViT model for the given `model_name` and `num_classes`, by default, or, with randomly initialized parameters, if `pretrained`
is set to False.
"""
models_list = ['vit_large_patch16_224', 'vit_large_patch16_224_in21k', 'vit_huge_patch14_224_in21k', 'vit_small_patch16_224', 'vit_small_patch16_224_in21k']
# Two tasks : (1) Generalize the assignments of num_path (2) (3) ()
def __init__(self, model_name, num_classes, pretrained = True) :
self.model_name = model_name
self.num_classes = num_classes
self.pretrained = pretrained
if self.model_name == 'vit_small_patch16_224' :
self.num_patches = 196
self.embed_dim = 384
self.ni = 384
elif self.model_name == 'vit_small_patch16_224_in21k' :
self.num_patches = 196
self.embed_dim = 384
self.ni = 384
elif self.model_name == 'vit_large_patch16_224' :
self.num_patches = 196
self.embed_dim = 1024
self.ni = 1024
elif self.model_name == 'vit_large_patch16_224_in21k' :
self.num_patches = 196
self.embed_dim = 1024
self.ni = 1024
elif self.model_name == 'vit_huge_patch14_224_in21k' :
self.num_patches = 256
self.embed_dim = 1280
self.ni = 1280
def create_model (self) :
"""Method for creating the model.
"""
return custom_ViT(self.model_name, self.num_patches, self.embed_dim, self.ni, self.num_classes, self.pretrained)
def initialize (self, model) :
"""Mthod for initializing the given `model`. This method uses truncated normal distribution for
initializing the position embedding as well as the class token, and, the head of the model is
initialized using He initialization.
"""
trunc_normal_(model[1], 'cls_tokens')
trunc_normal_(model[1], 'pos_embeds')
apply_init(model[3], nn.init.kaiming_normal_)
def get_callback (self) :
"""Method for getting the callback to train the embedding block of the `model`. It is highly recommended
to use the callback, returned by this method, while training a ViT model.
"""
class TrainEmbedCallback(Callback) :
def before_train(self) :
self.model[1].training = True
self.model[1].requires_grad_(True)
def before_validation(self) :
self.model[1].training = False
self.model[1].requires_grad_(False)
return TrainEmbedCallback()
show_doc(VisRecTrans.create_model)
show_doc(VisRecTrans.initialize)
show_doc(VisRecTrans.get_callback)
```
Let's see if this class is working well :
```
vis_rec_ob = VisRecTrans('vit_small_patch16_224', 10, False)
model_test = vis_rec_ob.create_model()
vis_rec_ob.initialize(model_test)
assert isinstance(model_test, nn.Sequential)
```
As we see, the model is a sequential list of layers, and can be used with the `Learner` class of [fastai](https://docs.fast.ai), as we use any other model.
#### The list of models supported by the `VisRecTrans` class :
```
VisRecTrans.models_list
```
| github_jupyter |
```
%matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
import tessreduce as tr
from scipy.optimize import minimize
from scipy import signal
from astropy.convolution import Gaussian2DKernel
from scipy.optimize import minimize
def Delta_basis(Size = 13):
kernal = np.zeros((Size,Size))
x,y = np.where(kernal==0)
middle = int(len(x)/2)
basis = []
for i in range(len(x)):
b = kernal.copy()
if (x[i] == x[middle]) & (y[i] == y[middle]):
b[x[i],y[i]] = 1
else:
b[x[i],y[i]] = 1
b[x[middle],y[middle]] = -1
basis += [b]
basis = np.array(basis)
coeff = np.ones(len(basis))
return basis, coeff
ra = 189.1385817
dec = 11.2316535
#ra = 64.526125
#dec = -63.61506944
tpf = tr.Get_TESS(ra,dec)
mask = tr.Make_mask(tpf,)
bkg = tr.New_background(tpf,mask,)
flux = tpf.flux.value - bkg
ref = flux[100]
offset = tr.Centroids_DAO(flux,ref,TPF=tpf,parallel=False)
cor = tr.Shift_images(offset,flux)
def Delta_kernal(Scene,Image,Size=13):
Basis, coeff_0 = Delta_basis(Size)
bds = []
for i in range(len(coeff_0)):
bds += [(0,1)]
coeff_0 *= 0.01
res = minimize(optimize_delta, coeff_0, args=(Basis,Scene,Image),
bounds=bds)
k = np.nansum(res.x[:,np.newaxis,np.newaxis]*Basis,axis=0)
return k
def optimize_delta(Coeff, Basis, Scene, Image):
Kernal = np.nansum(Coeff[:,np.newaxis,np.newaxis]*Basis,axis=0)
template = signal.fftconvolve(Scene, Kernal, mode='same')
im = Image.copy()
res = np.nansum(abs(im-template))
#print(res)
return res
thing = Delta_kernal(cor[100],cor[1200],Size=7)
def Make_temps(image, ref,size=7):
k = Delta_kernal(ref,image,Size=size)
template = signal.fftconvolve(ref, k, mode='same')
return template
ref = cor[100]
temps = np.zeros_like(cor)
for i in range(cor.shape[0]):
k = Delta_kernal(ref,cor[i],Size=7)
template = signal.fftconvolve(ref, k, mode='same')
print(i)
import multiprocessing
from joblib import Parallel, delayed
from tqdm import tqdm
num_cores = multiprocessing.cpu_count()
tmps = Parallel(n_jobs=num_cores)(delayed(Make_temps)(flux[i],ref) for i in tqdm(range(cor.shape[0])))
templates = np.array(tmps)
templates.shape
sub = cor - templates
sub2 = f - templates
#lc1, sky1 = tr.diff_lc(sub,tpf=tpf,ra=ra,dec=dec,tar_ap=3,sky_in=3,sky_out=5)
lc1, sky1 = tr.diff_lc(cor,tpf=tpf,ra=ra,dec=dec)
lc2, sky2 = tr.diff_lc(cor,tpf=tpf,x=45,y=50)
lc1[1] = lc1[1] - lc2[1]
plt.figure()
plt.fill_between(lc1[0],lc1[1]-lc1[2],lc1[1]+lc1[2],alpha=0.2)
plt.plot(lc1[0],lc1[1],'.')
plt.plot(sky1[0],sky1[1])
plt.fill_between(lc2[0],lc2[1]-lc2[2],lc2[1]+lc2[2],alpha=0.2,color='C2')
plt.plot(lc2[0],lc2[1],'C2')
plt.figure()
plt.subplot(121)
plt.imshow(cor[1000]-ref,vmin=-10,vmax=10)
plt.colorbar()
plt.subplot(122)
plt.imshow(sub[1000],vmin=-10,vmax=10)
plt.colorbar()
test = np.zeros_like(cor[100])
test[45,45] = 1000
test = cor[100]
basis, coeff_0 = Delta_basis(7)
bds = []
for i in range(len(coeff_0)):
bds += [(0,1)]
coeff_0 *= 0.01
res = minimize(optimize_delta, coeff_0, args=(basis,test,cor[1200]),bounds=bds)
res
from scipy.signal import convolve
k = np.nansum(res.x[:,np.newaxis,np.newaxis]*basis,axis=0)
template = signal.fftconvolve(test, k, mode='same')
np.sum(Kernel)
plt.figure()
plt.imshow(Kernel)
plt.colorbar()
np.nansum(template)
np.nansum(test)
plt.figure()
plt.subplot(121)
plt.imshow(cor[1000]-template,vmin=-10,vmax=10)
plt.colorbar()
plt.subplot(122)
plt.imshow(cor[1000]-cor[100],vmin=-10,vmax=10)
plt.colorbar()
im = cor[1000].copy()
template = signal.fftconvolve(cor[100], Kernel, mode='same')
im[im< 10] = np.nan
res = np.nansum(abs(np.log10(im - np.log10(template))))
a = im/cor[100]-1
m = sigma_clip(a,sigma=5).mask
a[m] = np.nan
plt.figure()
plt.imshow(a)
mask
np.nansum(a)
sigma_clip(im/cor[100]-1)
from astropy.stats import sigma_clip
```
| github_jupyter |
```
from __future__ import print_function
# !pip install tensorflow-gpu
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
%matplotlib inline
from keras.models import Sequential
from keras.layers import Dense , Dropout , Lambda, Flatten, Conv2D, MaxPool2D, BatchNormalization, Input,Concatenate
from keras.optimizers import SGD
from sklearn.model_selection import train_test_split
from keras.models import model_from_json
import cv2
import glob
import os
import pickle
#set global parameters
img_rows = 224
img_cols = 224
max_files = -1
read_from_cache = False
!cd ~/sharedfolder/
!git pull
%cd day04/
!ls
Path = '/root/sharedfolder/day04/'
filelist = glob.glob(Path)
filelist.sort()
filelist
def read_image(path,img_rows,img_cols):
img = cv2.imread(path)
return cv2.resize(img, (img_cols, img_rows))
def read_train(path,img_rows,img_cols,max_files):
# img_rows & img_cols set the size of the image in the output
# max files is the maximal number of images to read from each category
# use max_files=-1 to read all images within the train subfolders
X_train = []
y_train = []
counter = 0
print('Read train images')
files = glob.glob(path+'*.JPG')
for fl in files:
flbase = os.path.basename(fl)
img = read_image(fl, img_rows, img_cols)
X_train.append(np.asarray(img))
# y_train.append(j)
counter+=1
if (counter>=max_files)&(max_files>0):
break
return np.array(X_train)#, np.array(y_train)
def cache_data(data, path):
# this is a helper function used to cache data once it was read and preprocessed
if os.path.isdir(os.path.dirname(path)):
file = open(path, 'wb')
pickle.dump(data, file)
file.close()
else:
print('Directory doesnt exists')
def restore_data(path):
# this is a helper function used to restore cached data
data = dict()
if os.path.isfile(path):
file = open(path, 'rb')
data = pickle.load(file)
return data
def save_model(model,filename):
# this is a helper function used to save a keras NN model architecture and weights
json_string = model.to_json()
if not os.path.isdir('cache'):
os.mkdir('cache')
open(os.path.join('cache', filename+'_architecture.json'), 'w').write(json_string)
model.save_weights(os.path.join('cache', filename+'_model_weights.h5'), overwrite=True)
def read_model(filename):
# this is a helper function used to restore a keras NN model architecture and weights
model = model_from_json(open(os.path.join('cache', filename+'_architecture.json')).read())
model.load_weights(os.path.join('cache', filename+'_model_weights.h5'))
return model
tr_data = read_train(Path,224,224,-1)
y_train = pd.read_csv(Path+'card_files_labels.csv')
from keras.utils.np_utils import to_categorical
ids = y_train.card_file
y_train.drop('card_file',inplace=True,axis=1)
OHE_y_train = (y_train)
tr_data.shape
y_train
OHE_y_train.shape
plt.imshow(tr_data[1])
model= Sequential()
model.add(Conv2D(16,(2,2),activation='relu',input_shape=(img_rows,img_cols,3)))
model.add(Conv2D(32,(2,2),activation='relu'))
model.add(MaxPool2D(pool_size=(2,2),padding='valid'))
model.add(Conv2D(32,(2,2),activation='relu'))
model.add(Conv2D(16,(2,2),activation='relu'))
model.add(MaxPool2D(pool_size=(2,2),padding='valid'))
model.add(Flatten())
model.add(Dense(57, activation='sigmoid'))
model.summary()
model.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
rlrop = ReduceLROnPlateau(factor=0.3)
datagen = ImageDataGenerator(
horizontal_flip=True,
rescale=0.5,
shear_range=0.1,
zoom_range=0.4,
rotation_range=360,
width_shift_range=0.1,
height_shift_range=0.1
)
model.fit_generator(datagen.flow(tr_data, OHE_y_train, batch_size=8,save_to_dir='/root/sharedfolder/double/gen_imgs/'),callbacks=[rlrop],
validation_data=datagen.flow(tr_data,OHE_y_train),
steps_per_epoch=50,validation_steps = 10, epochs=4,verbose=2)
cd /root/sharedfolder/double/single\ images/
ls
single_tr_data = read_train('/root/sharedfolder/double/single images/',80,80,-1)
sngl_imgs = [x.split('/')[-1].split('.')[0] for x in glob.glob('/root/sharedfolder/double/single images/'+'*.JPG')]
# sngl_imgs
np.setdiff1d(np.array(y_train.columns),sngl_imgs)
plt.imshow(single_tr_data[0])
from PIL import Image
background = Image.open('/root/sharedfolder/double/gen_imgs/blank.jpg', 'r')
img = Image.open('/root/sharedfolder/double/single images/cat.JPG', 'r')
img_w, img_h = img.size
bg_w, bg_h = background.size
offset = ((bg_w - img_w) / 2, (bg_h - img_h) / 2)
# img = img.rotate(45,resample=Image.NEAREST)
background.paste(img, offset)
background.save('/root/sharedfolder/double/gen_imgs/out.png')
gen = cv2.imread('/root/sharedfolder/double/gen_imgs/out.png')
plt.imshow(gen)
ls /root/sharedfolder/double/gen_imgs/
rm /root/sharedfolder/double/gen_imgs/*.png
img_rows,img_cols = 80,80
model= Sequential()
model.add(Conv2D(16,(2,2),activation='relu',input_shape=(img_rows,img_cols,3)))
model.add(Flatten())
model.add(Dense(57, activation='sigmoid'))
model.summary()
model.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
singles_datagen = ImageDataGenerator(
horizontal_flip=True,
rotation_range=360,
width_shift_range=0.1,
height_shift_range=0.1
)
model.fit_generator(singles_datagen.flow(single_tr_data[:55], OHE_y_train, batch_size=8,save_to_dir='/root/sharedfolder/double/gen_imgs/'),callbacks=[rlrop],
validation_data=singles_datagen.flow(single_tr_data[:55],OHE_y_train),
steps_per_epoch=50,validation_steps = 10, epochs=4,verbose=2)
ls
gen = cv2.imread('/root/sharedfolder/double/gen_imgs/_1_9894.png')
plt.imshow(gen)
```
| github_jupyter |
# T5 for Cross-Language Plagiarism Detection
Author: Joรฃo Phillipe Cardenuto
In this notebook we implement a model regarding the Detailed Analysis of the CLPD.
# Import Libraries
```
! pip install -q pytorch-lightning
! pip install -q transformers
# Mount drive
from google.colab import drive
drive.mount('/content/drive',force_remount=True)
# Comum libraries
import os
import random
from typing import Dict
from typing import List
import numpy as np
import pandas as pd
import re
from argparse import Namespace
from tqdm.notebook import trange, tqdm_notebook
# Dataset
import sys
sys.path.insert(0, "/work/src/DataloaderCLPD/")
from LoadDataset import *
# Torch
import torch
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import torch.nn.functional as F
from torch.optim.lr_scheduler import StepLR
# HugginFace
from transformers import T5Tokenizer, T5ForConditionalGeneration
# Sklearn
from sklearn.metrics import accuracy_score, f1_score
from sklearn.model_selection import train_test_split
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
# Tersorflow
import tensorboard
%load_ext tensorboard
# Lightning
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
# Setup seeds
seed = 0
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
device = torch.device(dev)
print("Device",dev)
if "cuda" in dev:
print("GPU: ", torch.cuda.get_device_name(0))
# Loading T5 Tokenizer with portuguese chars
port_tokenizer = T5Tokenizer.from_pretrained('t5-base')
extra_tokens = ['ร','รง','ร' , 'ร' , 'ร' , 'ร' , 'ร' , 'ร ' , 'รจ' , 'รฌ' , 'รฒ' , 'รน' , 'ร' , 'ร' , 'ร' , 'ร' , 'ร' , 'รก' , 'รฉ' , 'รญ' , 'รณ' , 'รบ' , 'ร' , 'ร' , 'ร' , 'ร' , 'ร' , 'รข' , 'รช' , 'รฎ' , 'รด' , 'รป' , 'ร' ,'แบผ', 'ร' , 'รฃ', 'แบฝ','รต' , 'ร', 'รค' , 'รซ' , 'รฏ' , 'รถ' , 'รผ']
new_tokens = {}
for i in extra_tokens: print(f'({i},{port_tokenizer.decode(port_tokenizer.encode(i))})',end=", ")
print("\n","*-"*10,"New Tokens","*-"*10)
for i in extra_tokens:
# Add_tokens return 0 if token alredy exist, and 1 if It doesnt.
if port_tokenizer.add_tokens(i):
print(f"{i},{port_tokenizer.encode(text=i,add_special_tokens=False,)}", end=" |")
new_tokens[i] = port_tokenizer.encode(text=i,add_special_tokens=False)[0]
```
## Loading Data
Using LoadDataset module to load capes dataset
```
# We using sample size of x it represent x*(2*N_negatives + 2) = x*(ENG + PT + ENG_NEGATIVE_1 ... + ENG_NEGATIVE_N +
# PT_NEGATIVE_1 + ... + PT_NEGATIVE_N)
capes_dataset = CLPDDataset(name='capes',data_type='train',sample_size=1000,val_size=0.2,n_negatives=1)
train_capes, val_capes = capes_dataset.get_organized_data(tokenizer=port_tokenizer,tokenizer_type='t5')
# Samples
print(train_capes[0])
print("Number of Samples:", len(train_capes.pairs))
dataloader_debug = DataLoader(train_capes, batch_size=10, shuffle=True,
num_workers=0)
token_ids, attention_mask, token_type_ids, labels, _ = next(iter(dataloader_debug))
print('token_ids:\n', token_ids)
print('token_type_ids:\n', token_type_ids)
print('attention_mask:\n', attention_mask)
print('labels:\n', labels)
print('token_ids.shape:', token_ids.shape)
print('token_type_ids.shape:', token_type_ids.shape)
print('attention_mask.shape:', attention_mask.shape)
print('labels.shape:', labels.shape)
batch_size = 2
# train_dataloader = DataLoader(dataset_train, batch_size=batch_size,
# shuffle=True, num_workers=4)
val_dataloader = DataLoader(val_capes, batch_size=batch_size, shuffle=True,
num_workers=4)
test_dataloader = DataLoader(val_capes, batch_size=batch_size, shuffle=False,
num_workers=4)
# test_dataloader = DataLoader(dataset_test, batch_size=batch_size,
# shuffle=False, num_workers=4)
label
port_tokenizer.decode(a[0])
port_tokenizer.encode(f"{0} {port_tokenizer.eos_token}",max_length=3, pad_to_max_length=True)
[valid_prediction(a[index],label[index])
for index in range(len(a))]
a = mode2l(input_ids=input_ids.to(device), attention_mask=attention_mask.to(device),
lm_labels=labels.to(device))[0]
```
## T5-Model with Pytorch Lightning
```
def valid_prediction(pred,label):
"""
Decode prediction and label.
Return ( prediction, label) if decode(pred) in {0,1},
otherwise return (not label, label)
"""
text_result = port_tokenizer.decode(pred)
label = port_tokenizer.decode(label)
# Check if string is numeric
if text_result.replace('.','',1).isnumeric():
value = float(text_result)
if value == 1 or value == 0:
return (int(value) , int(label))
# Return a different number from the label
return (int(not int(label)), int(label))
class T5Finetuner(pl.LightningModule):
def __init__(self, hparams,train_dataloader,val_dataloader,test_dataloader):
super(T5Finetuner, self).__init__()
#Hiperparameters
self.hparams = hparams
self.experiment_name = f"{self.hparams.experiment_name}_{self.hparams.version}"
# Dataloaders
self._train_dataloader = train_dataloader
self._val_dataloader = val_dataloader
self._test_dataloader = test_dataloader
# Learnning Rate and Loss Function
self.learning_rate = hparams.learning_rate
self.lossfunc = torch.nn.CrossEntropyLoss()
# Optimizer
self.optimizer = self.hparams.optimizer
self.target_max_length = self.hparams.target_max_length
# Retrieve model from Huggingface
self.model = T5ForConditionalGeneration.from_pretrained('t5-base').to(device)
def forward(self, input_ids, attention_mask, labels=None):
# If labels are None, It will return a loss and a logit
# Else it return the predicted logits for each sentence
if self.training:
# Ref https://huggingface.co/transformers/model_doc/t5.html#training
loss = self.model(input_ids=input_ids,
attention_mask=attention_mask,
lm_labels=labels)[0]
return loss
else:
# REF https://huggingface.co/transformers/main_classes/model.html?highlight=generate#transformers.PreTrainedModel.generate
predicted_token_ids = self.model.generate(
input_ids=input_ids,
max_length=self.target_max_length,
do_sample=False,
)
return predicted_token_ids
def training_step(self, batch, batch_nb):
# batch
input_ids, attention_mask, _, label,_ = batch
# fwd
loss = self(input_ids=input_ids.to(device),
attention_mask=attention_mask.to(device),
labels=label.to(device))
# logs
tensorboard_logs = {'train_loss': loss.item()}
return {'loss': loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_nb):
# batch
input_ids, attention_mask, _, labels,_ = batch
# fwd
predicted_token_ids = self(input_ids.to(device), attention_mask=None,)
pred_true_decoded = [valid_prediction(predicted_token_ids[index],labels[index])
for index in range(len(predicted_token_ids))]
y_pred = [y[0] for y in pred_true_decoded]
y_true = [y[1] for y in pred_true_decoded]
return {'y_pred': y_pred, 'y_true': y_true}
def validation_epoch_end(self, outputs):
y_true = np.array([ y for x in outputs for y in x['y_true'] ])
y_pred = np.array([ y for x in outputs for y in x['y_pred'] ])
val_f1 = f1_score(y_pred=y_pred, y_true=y_true)
val_f1 = torch.tensor(val_f1)
tensorboard_logs = {'val_f1': val_f1 }
return {'val_f1': val_f1,
'progress_bar': tensorboard_logs, "log": tensorboard_logs}
def test_step(self, batch, batch_nb):
input_ids, attention_mask, _ , labels, pairs = batch
predicted_token_ids = self(input_ids.to(device), attention_mask=None)
pred_true_decoded = [valid_prediction(predicted_token_ids[index],labels[index])
for index in range(len(predicted_token_ids))]
y_pred = [y[0] for y in pred_true_decoded]
y_true = [y[1] for y in pred_true_decoded]
return {'pairs': pairs, 'y_true': y_true, 'y_pred':y_pred }
def test_epoch_end(self, outputs):
pairs = [pair for x in outputs for pair in x['pairs']]
y_true = np.array([ y for x in outputs for y in x['y_true'] ])
y_pred = np.array([ y for x in outputs for y in x['y_pred'] ])
# Write failure on file
with open (f"FAILURE_{self.experiment_name}.txt", 'w') as file:
for index,pair in enumerate(pairs):
if y_true[index] != y_pred[index]:
file.write("="*50+f"\n[Y_TRUE={y_true[index]} != Y_PRED={y_pred[index]}]\n"+pair \
+'\n'+"="*50+'\n')
print("CONFUSION MATRIX:")
print(confusion_matrix(y_true=y_true, y_pred=y_pred))
print("SKLEARN REPORT")
print(classification_report(y_true=y_true, y_pred=y_pred))
test_f1 = f1_score(y_pred=y_pred, y_true=y_true)
tensorboard_logs = {'test_f1': test_f1}
return {'test_f1': test_f1, 'log': tensorboard_logs,
'progress_bar': tensorboard_logs}
def configure_optimizers(self):
optimizer = self.optimizer(
[p for p in self.parameters() if p.requires_grad],
lr=self.learning_rate)
scheduler = StepLR(optimizer, step_size=self.hparams.steplr_epochs, gamma=self.hparams.scheduling_factor)
return [optimizer], [scheduler]
def train_dataloader(self):
return self._train_dataloader
def val_dataloader(self):
return self._val_dataloader
def test_dataloader(self):
return self._test_dataloader
hyperparameters = {"experiment_name": "T5-DEBUG",
"max_epochs": 2,
"patience": 4,
"optimizer": torch.optim.Adam,
"target_max_length": 10,
"scheduling_factor": 0.8,
"learning_rate": 1e-5,
"steplr_epochs":4,
}
model = T5Finetuner(hparams=Namespace(**hyperparameters),
train_dataloader=val_dataloader,
val_dataloader=test_dataloader,
test_dataloader=test_dataloader)
```
## Number of Parameter
```
sum([torch.tensor(x.size()).prod() for x in model.parameters() if x.requires_grad]) # trainable parameters
```
## Fast dev run
```
trainer = pl.Trainer(gpus=1,
logger=False,
checkpoint_callback=False, # Disable checkpoint saving.
fast_dev_run=True,
amp_level='O2', use_amp=False
)
trainer.fit(model)
trainer.test(model)
del model
```
## Overfit on a Batch
We notice that easily the model can overfit on a batch
```
hyperparameters = {"experiment_name": "T5CLPD",
"optimizer": torch.optim.Adam,
"target_max_length": 3,
"max_epochs": 5,
"patience": 4,
"steplr_epochs":5,
"scheduling_factor": 0.95,
"learning_rate": 6e-5,
"max_length": 200
}
trainer = pl.Trainer(gpus=1,
logger=False,
max_epochs=hyperparameters['max_epochs'],
check_val_every_n_epoch=5,
checkpoint_callback=False, # Disable checkpoint saving
overfit_pct=0.5,
amp_level='O2', use_amp=False)
model = T5Finetuner(hparams=Namespace(**hyperparameters),
train_dataloader=val_dataloader,
val_dataloader=test_dataloader,
test_dataloader=test_dataloader)
trainer.fit(model)
trainer.test(model)
# del model
# del trainer
train_capes[0]
```
## Training
```
# Training will perform a cross-dataset.
max_length = 200
scielo_dataset = CLPDDataset(name='scielo',data_type='train',sample_size=100000,val_size=0.2,max_length=200,n_negatives=1)
scielo_test = CLPDDataset(name='scielo',data_type='test',n_negatives=1,max_length=200)
# T5 Tokenizer with portuguese chars
tokenizer = port_tokenizer
# Traning data
train_scielo, val_scielo = scielo_dataset.get_organized_data(tokenizer=tokenizer,tokenizer_type='t5')
test_scielo = scielo_test.get_organized_data(tokenizer=tokenizer,tokenizer_type='t5')
len(test_scielo)
#------tester-----------#
# DataLoaders #
#-------------------#
batch_size = 32
train_dataloader = DataLoader(train_scielo, batch_size=batch_size,
shuffle=True, num_workers=4)
val_dataloader = DataLoader(val_scielo, batch_size=batch_size, shuffle=False,
num_workers=4)
test_dataloader = DataLoader(test_scielo, batch_size=batch_size,
shuffle=False, num_workers=4)
# Hiperparameters
hyperparameters = {"experiment_name": "T5-SCIELO",
"optimizer": torch.optim.Adam,
"target_max_length": 3,
"max_epochs": 3,
"patience": 4,
"steplr_epochs":1,
"scheduling_factor": 0.95,
"learning_rate": 6e-5,
"max_length": max_length,
'batch_size': batch_size
}
#------------------------------#
# Checkpoints #
#------------------------------#
log_path = 'logs'
ckpt_path = os.path.join(log_path, hyperparameters["experiment_name"], "-{epoch}-{val_f1:.2f}")
checkpoint_callback = ModelCheckpoint(prefix=hyperparameters["experiment_name"], # prefixo para nome do checkpoint
filepath=ckpt_path, # path onde serรก salvo o checkpoint
monitor="val_f1",
mode="max",
save_top_k=2)
# Hard coded
# resume_from_checkpoint = '/content/drive/My Drive/P_IA376E_2020S1/Class-8 BERT/TASK/logs/Electra-400/Electra-400-epoch=37-val_loss=0.18.ckpt'
resume_from_checkpoint= False
# Configuraรงรฃo do Early Stop
early_stop = EarlyStopping(monitor="val_loss",
patience=hyperparameters["patience"],
verbose=False,
mode='min'
)
logger = TensorBoardLogger(hyperparameters["experiment_name"],name='T5' ,version="NEGATIVE_1")
# Lighting Trainer
trainer = pl.Trainer(gpus=1,
logger=logger,
max_epochs=hyperparameters["max_epochs"],
check_val_every_n_epoch=1,
accumulate_grad_batches=5,
checkpoint_callback=checkpoint_callback,
# resume_from_checkpoint=resume_from_checkpoint,
amp_level='O2', use_amp=False)
hparams = Namespace(**hyperparameters)
model = T5Finetuner(hparams=hparams,train_dataloader=train_dataloader,val_dataloader=val_dataloader,test_dataloader=test_dataloader)
# Train
trainer.fit(model)
```
## Test model on Capes dataset
```
trainer.test(model)
```
-----
# Test model on CAPES dataset
```
# T5 tokenizer with portuguese chars
tokenizer = port_tokenizer
max_length = 200
batch_size = 128
scielo_dataset = CLPDDataset(name='capes',data_type='test',n_negatives=1,max_length=200)
scielo_dataset = scielo_dataset.get_organized_data(tokenizer=tokenizer,tokenizer_type='t5')
scielo_dataloader = DataLoader(scielo_dataset, batch_size=batch_size,
shuffle=False, num_workers=4)
# Hiperparameters
hyperparameters = {"experiment_name": "T5-SCIELO",
"version": 'TEST-ON-CAPES',
"optimizer": torch.optim.Adam,
"target_max_length": 3,
"max_epochs": 3,
"patience": 4,
"steplr_epochs":1,
"scheduling_factor": 0.95,
"learning_rate": 6e-5,
"max_length": max_length,
'batch_size': batch_size
}
#------------------------------#
# Checkpoints #
#------------------------------#
log_path = 'logs'
ckpt_path = os.path.join(log_path, hyperparameters["experiment_name"], "-{epoch}-{val_loss:.2f}")
# Resume from checkpoint Hard coded
resume_from_checkpoint= '/work/src/T5/logs/T5-SCIELO/T5-SCIELO-epoch=1-val_f1=0.98.ckpt'
# Logger
logger = TensorBoardLogger(hyperparameters["experiment_name"], name='T5' ,version=hyperparameters['version'])
# Lighting Tester
tester = pl.Trainer(gpus=1,
logger=logger,
resume_from_checkpoint=resume_from_checkpoint,
amp_level='O2', use_amp=False)
hparams = Namespace(**hyperparameters)
model = T5Finetuner(hparams=hparams,train_dataloader=None,val_dataloader=None,test_dataloader=scielo_dataloader)
tester.test(model)
```
-----
## Test on books dataset
```
tokenizer = port_tokenizer
max_length = 200
batch_size = 300
books_dataset = CLPDDataset(name='books',data_type='test')
books_dataset = books_dataset.get_organized_data(tokenizer=tokenizer,tokenizer_type='t5')
books_dataloader = DataLoader(books_dataset, batch_size=batch_size,
shuffle=False, num_workers=4)
# Hiperparameters
hyperparameters = {"experiment_name": "T5-SCIELO",
"version": 'TEST-ON-BOOKS',
"optimizer": torch.optim.Adam,
"target_max_length": 3,
"max_epochs": 3,
"patience": 4,
"steplr_epochs":1,
"scheduling_factor": 0.95,
"learning_rate": 6e-5,
"max_length": max_length,
'batch_size': batch_size
}
#------------------------------#
# Checkpoints #
#------------------------------#
# Resume from checkpoint Hard coded
resume_from_checkpoint= '/work/src/T5/logs/T5-SCIELO/T5-SCIELO-epoch=1-val_f1=0.98.ckpt'
# Logger
logger = TensorBoardLogger(hyperparameters["experiment_name"], name='T5' ,version=hyperparameters['version'])
# Lighting Tester
tester = pl.Trainer(gpus=1,
logger=logger,
resume_from_checkpoint=resume_from_checkpoint,
amp_level='O2', use_amp=False)
hparams = Namespace(**hyperparameters)
model = T5Finetuner(hparams=hparams,train_dataloader=None,val_dataloader=None,test_dataloader=books_dataloader)
tester.test(model)
```
| github_jupyter |
## Some Math
Let's assume all objects are always centered at $x=0$ to simplify the FFT handling.
We need a few relations to understand the math.
1. The Fourier transform of a function like $x^2W(x)$ is $F[x^2W(x)] \propto \frac{d^2\hat{W}(k)}{dk^2}$.
2. The Fourier transform of a Gaussian is a Gaussian, which we can write generically as $\exp(-\alpha^2 k^2)$. Here $\alpha$ is related to the real-space FWHM of the profile via some constants we won't bother with.
3. A convolution in real-space is a product in Fourier space.
4. A weighted sum over a profile in real-space can be written as an integral in Fourier space.
This last relation is worth discussin in detail. Suppose we have an image $I(x)$, a weight function $W(x)$, and we want to compute the integral $f = \int dx I(x) W(x)$. This integral is actuall the value of the convolution of $I(x)$ with $W(x)$ at $x=0$,
$$
f = \int dx I(x) W(x) = \left. \int dx I(x) W(x - y)\right|_{y = 0}
$$
In Fourier space we can write this relation as
$$
f \propto \left.\int dk \hat{I}(k)\hat{W}(k) \exp(-iky)\right|_{y=0} = \int dk \hat{I}(k)\hat{W}(k)
$$
So this property combined item 1 above means we can write the weighted moments of an object in real-space as integrals in Fourier space over the weight function and its derivatives
$$
f \propto \int dk \hat{I}(k)\hat{W}(k)
$$
$$
<x^2> \propto \int dk \hat{I}(k)\frac{d^{2}\hat{W}(k)}{dk_x^2}
$$
$$
<xy> \propto \int dk \hat{I}(k)\frac{d^2\hat{W}(k)}{dk_x dk_y}
$$
$$
<y^2> \propto \int dk \hat{I}(k)\frac{d^2\hat{W}(k)}{dk_y^2}
$$
## What about the PSF?
So now let's assume we have an object, a PSF, and a weight function. Further, let's assume that the weight function is always bigger than the PSF and that the weight function is Gaussian.
In this case, we can immediately see that all of the derivatives of the weight function in Fourier space can be written as a product of some polynomial and the weight function iself. The constraint that the weight function be larger than the PSF means that $\alpha_{psf} < \alpha_{w}$. Finally, we have some object with $\alpha_g$.
In terms of the profile of $k$ we have the following situation illustrated in the plot below.
```
import proplot as plot
import numpy as np
def prof(k, a):
return np.log10(np.exp(-(a*k)**2))
k = np.logspace(-1, 1.5, 100)
apsf = 1
aw = 1.5
ag = 0.25
fig, axs = plot.subplots(figsize=(4, 4))
axs.semilogx(k, prof(k, np.sqrt(ag**2+apsf**2)), label='gal+psf')
axs.semilogx(k, prof(k, apsf), label='psf')
axs.semilogx(k, prof(k, ag), label='gal')
axs.semilogx(k, prof(k, aw), label='wgt')
axs.format(xlabel='log10[k]', ylabel='log10[f(k)]')
axs.legend()
```
From this plot you can see that even for real-space moments, as long as the Fourier transforms of the moment kernels are broader than PSF, we remove modes suppressed by the PSF. Thus we can set these suppressed modes (where the PSF amplitude cannot be deconvolved) in Fourier space to zero without harm.
| github_jupyter |
# Neural Networks
## 1. Neural Networks
In this section, we will implement backpropagation algorithm to learn the parameters for the neural network.
### 1.1 Visualizing the data
The data is the same as assignment 3, 5000 training examples, each contains a 20 pixel by 20 pixel grayscale image of the digit.
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
%matplotlib inline
from scipy.io import loadmat
data = loadmat('ex3data1.mat')
X = data["X"] # 5000x400 np array
y = data["y"] # 5000x1 np array (2d)
y = y.flatten() # change to (5000,) 1d array and
y[y==10] = 0 # in original data, 10 is used to represent 0
def displayData(X):
""" displays the 100 rows of digit image data stored in X in a nice grid.
It returns the figure handle fig, ax
"""
# form the big 10 x 10 matrix containing all 100 images data
# padding between 2 images
pad = 1
# initialize matrix with -1 (black)
wholeimage = -np.ones((20*10+9, 20*10+9))
# fill values
for i in range(10):
for j in range(10):
wholeimage[j*21:j*21+20, i*21:i*21+20] = X[10*i+j, :].reshape((20, 20))
fig, ax = plt.subplots(figsize=(6, 6))
ax.imshow(wholeimage.T, cmap=plt.cm.gray, vmin=-1, vmax=1)
ax.axis('off')
return fig, ax
# randomly select 100 data points to display
rand_indices = np.random.randint(0, 5000, size=100)
sel = X[rand_indices, :]
# display images
fig, ax = displayData(sel)
```
### 1.2 Model representation
Our neural network is shown in the following figure. It has 3 layers: an input layer, a hidden layer and an output layer. The neural network used contains 25 units in the 2nd layer and 10 output units (corresponding to 10 digit classes).

### 1.3 Feedforward and cost function
Recall that the cost function for the neural network (without regularization) is:
$$ J(\theta)=\frac{1}{m}\sum_{i=1}^{m} \sum_{k=1}^{K}[-y^{(i)}log((h_\theta(x^{(i)}))_k)-(1-y^{(i)})log(1-(h_\theta(x^{(i)}))_k)]$$
where $h_\theta(x^{(i)})$ is computed as shown in the above figure and K=10 is the total number of possible labels. Note that $h_\theta(x^{(i)})_k = a_k^{(3)}$ is the activation of the k-th output unit. Also, remember that whereas the original labels (in the variable y) were 0, 1, ..., 9, for the purpose of training a neural network, we need to recode the labels as vectors containing only values 0 or 1, so:
$$ y = \left[\matrix{1\\ 0\\ 0\\ \vdots\\ 0}\right], \left[\matrix{0\\ 1\\ 0\\ \vdots\\ 0}\right], ..., or \left[\matrix{0\\ 0\\ 0\\ \vdots\\ 1}\right] $$
#### Vectorization
Matrix dimensions:
$X_{wb}$: 5000 x 401
$\Theta^{(1)}$: 25 x 401
$\Theta^{(2)}$: 10 x 26
$a^{(2)}$: 5000 x 25 or 5000 x 26 after adding intercept terms
$a^{(3)} or H_\theta(x)$: 5000 x 10
$Y$: 5000 x 10
$$a^{(2)} = g(X_{wb}\Theta^{(1)^T})$$
$$ H_\theta(x) = a^{(3)} = g(a^{(2)}_{wb}\Theta^{(2)^T})$$
$$ H_\theta(x) = \left[\matrix{-(h_\theta(x^{(1)}))^T-\\ -(h_\theta(x^{(2)}))^T-\\ \vdots\\ -(h_\theta(x^{(m)}))^T-}\right] $$
$$ Y = \left[\matrix{-(y^{(1)})^T-\\ -(y^{(2)})^T-\\ \vdots\\ -(y^{(m)})^T-}\right] $$
Therefore, cost is:
$$ J(\theta)=\frac{1}{m} \sum_{matrix-elements} (-Y .* log(H_\theta(x))-(1-Y) .* log(1-H_\theta(x))) $$
Note the element wise multiplication (.*) and sum of all matrix elements in the above equation.
### 1.4 Regularized cost function
The cost function for neural networks with regularization is given by:
$$ J(\theta)=\frac{1}{m}\sum_{i=1}^{m} \sum_{k=1}^{K}[-y^{(i)}log((h_\theta(x^{(i)}))_k)-(1-y^{(i)})log(1-(h_\theta(x^{(i)}))_k)] + \frac{\lambda}{2m}\left[\sum_{j=1}^{25}\sum_{k=1}^{400}(\Theta_{j, k}^{(1)})^2 + \sum_{j=1}^{10}\sum_{k=1}^{25}(\Theta_{j, k}^{(2)})^2\right]$$
Note that even though the additional regularization term seems complicated with all the cascaded Sigma symbols, it is actually just the sum of all elements (after taking square) in the $\Theta$ matrix, one of them is 25 by 400, the other is 10 by 25 (recall that bias term is by convention not included in regularization). If your regularization parameter $\lambda$ is very very large, then all your $\Theta$ will converge to zero.
#### Vectorization
For the regularization term, there's actually nothing much to vectorize. Using elementwise self-multiplication then sum all elements in the result will do it:
$$ J(\theta)=\frac{1}{m} \sum_{matrix-elements} (-Y .* log(H_\theta(x))-(1-Y) .* log(1-H_\theta(x))) + \frac{\lambda}{2m} \left[\sum_{matrix-elements}(\Theta_{j, k}^{(1)} .* \Theta_{j, k}^{(1)})+\sum_{matrix-elements}(\Theta_{j, k}^{(2)} .* \Theta_{j, k}^{(2)})\right]$$
```
def sigmoid(z):
""" sigmoid(z) computes the sigmoid of z. z can be a number,
vector, or matrix.
"""
g = 1 / (1 + np.exp(-z))
return g
def nnCostFunction(nn_params, input_lsize, hidden_lsize, num_labels, X, y, lmd):
""" computes the cost and gradient of the neural network. The
parameters for the neural network are "unrolled" into the vector
nn_params and need to be converted back into the weight matrices.
The returned parameter grad should be a "unrolled" vector of the
partial derivatives of the neural network.
X should already include bias terms
Y is a 2d matrix
"""
# number of training samples
m, n = X.shape
# restore Theta1 and Theta2 from nn_params
Theta1 = nn_params[:hidden_lsize*(input_lsize+1)].reshape((hidden_lsize, input_lsize+1))
Theta2 = nn_params[hidden_lsize*(input_lsize+1):].reshape((num_labels, hidden_lsize+1))
# forward propagation
a2 = sigmoid(X @ Theta1.T)
a2_wb = np.concatenate((np.ones((m, 1)), a2), axis=1)
a3 = sigmoid(a2_wb @ Theta2.T) # i.e. H_theta
# Calculate cost
temp1 = -y * np.log(a3) - (1-y) * np.log(1-a3)
temp2 = np.sum((Theta1**2).flatten()) + np.sum((Theta2**2).flatten())
J = np.sum(temp1.flatten()) / m + lmd * temp2 / (2*m)
return J
# define input_lsize, hidden_lsize and numb_labels
input_lsize = 400
hidden_lsize = 25
num_labels = 10
m = len(y) # number of samples
# add bias terms to X
X_wb = np.concatenate((np.ones((m, 1)), X), axis=1)
# convert y to 2d matrix Y, 5000 by 10
# each row represents a sample, containing 0 or 1
Y = np.zeros((m, num_labels))
for i, v in enumerate(y):
# # NOTE: v=0 maps to position 9
# if v != 0:
# Y[i, v-1] = 1
# else:
# Y[i, 9] = 1
#print(Y[:100, :])
# using Python's zero-indexing convention
Y[i, v] = 1
# Load pre-calculated nn_params Theta1 and Theta2
# In ex4weights are 2 parameters:
# Theta1: 25 by 401
# Theta2: 10 by 26
# from scipy.io import loadmat
data = loadmat('ex3weights.mat')
Theta1 = data["Theta1"]
Theta2 = data["Theta2"]
# unroll Theta1 and Theta2 into nn_params
# NOTE: ndarray.flatten() will unroll by row, which does not match the A(:) behavior in MATLAB (by column)
# However, since the flattened data will be reshaped by ndarray,reshape(), which by default
# reshape by row, so you will actually get the original Theta1 and Theta2 back
# In summary, your flatten() and reshape() function should use the same order
# either both by numpy default, or both by 'F' order
nn_params = np.concatenate((Theta1.flatten(), Theta2.flatten()))
print(nn_params.shape) # should be (10285,)
# Regularization factor
lmd = 0
# Test nnCostFunction()
J = nnCostFunction(nn_params, input_lsize, hidden_lsize, num_labels, X_wb, Y, lmd)
print(J)
print("Expected ~0.287629")
# test cost function with reularization
lmd = 1
J = nnCostFunction(nn_params, input_lsize, hidden_lsize, num_labels, X_wb, Y, lmd)
print(J)
print("Expected around 0.383770")
```
## 2. Backpropagation
In this part, we implement the backpropagation algo to compute the gradient for the neural network cost function. Once this is done, we will be able to train the neural network by minimizing the cost function using an optimizer.
### 2.1 Sigmoid gradient
The gradient for the sigmoid function can be computed as:
$$ g'(z)=\frac{d}{dz}g(z)=g(z)(1-g(z))$$
where
$$g(z)=\frac{1}{1+e^{-z}}$$
For large values (both positive and negative) of z, the gradient should be close to 0. When z = 0, the gradient should be exactly 0.25.
```
def sigmoidGradient(z):
""" computes the gradient of the sigmoid function
evaluated at z. This should work regardless if z is a matrix or a
vector. In particular, if z is a vector or matrix, you should return
the gradient for each element.
"""
return sigmoid(z) * (1 - sigmoid(z))
# test sigmoidGradient(z)
z = np.array([-10, 0, 10])
print(sigmoidGradient(z))
```
### 2.2 Random initialization
When training neural networks, it is important to randomly initialize the parameters for symmetry breaking. Otherwise, the units in hidden layers will be identical to each other.
One effective strategy for random initialization is to randomly select values for $\Theta^{(l)}$ uniformly in the range $[-\epsilon_{init}, \epsilon_{init}]$. You should use $\epsilon_{init}=0.12$. This range of values ensures that the parameters are kept small and makes the learning more efficient.
```
def randInitializeWeights(L_in, L_out):
""" randomly initializes the weights of a layer with
L_in incoming connections and L_out outgoing connections.
Note that return variable W should be set to a matrix of size(L_out, 1 + L_in) as
the first column of W handles the "bias" terms.
"""
epsilon_init = 0.12
W = np.random.rand(L_out, 1+L_in) * 2 * epsilon_init - epsilon_init
return W
```
### 2.3 Backpropagation

Recall that the intuition behind the backpropagation algorithm is as follows. Given a training example (x(t); y(t)), we will first run a "forward pass" to compute all the activations throughout the network, including the output value of the hypothesis $h_\theta(x)$. Then, for each node $j$ in layer $l$, we would like to compute an "error term" $\delta_j^{(l)}$ that measures how much that node was "responsible" for any errors in the output.
For an output node, we can directly measure the difference between the network's activation and the true target value, and use that to define $\delta_j^{(3)}$(since layer 3 is the output layer). For the hidden units, you will compute
$\delta_j^{(l)}$ based on a weighted average of the error terms of the nodes in layer $(l + 1)$.
Detailed steps are as follows:
1) Perform a feedforward pass, computing the activations for Layers 2 and 3
2) For each output unit k in Layer 3 (the output layer), set
$$\delta_k^{(3)}=a_k^{(3)}-y_k$$
where $y_k\in[0,1]$ indicates whether the current training example belongs to class k or not.
3) For Layer 2, set
$$\delta^{(2)} = (\Theta^{(2)})^T\delta^{(3)}.*g'(z^{(2)})$$
4) Accumulate the gradient from this example using the following formula. Note that you should skip or remove $\delta_0^{(2)}$:
$$\Delta^{(l)}=\Delta^{(l)}+\delta^{(l+1)}(a^{(l)})^T$$
Do this for all training examples.
5) Obtain the gradient by dividing the accumulated gradients by m:
$$\frac{\partial}{\partial\Theta_{ij}^{(l)}}J(\Theta) = D_{ij}^{(l)} = \frac{1}{m}\Delta_{ij}^{(l)}$$
#### Vectorization
Here, we still use the full vectorization form that we used above, so we have:
$$\delta^{(3)}=a^{(3)}-y$$
$$\delta^{(2)} = \delta^{(3)}\Theta^{(2)}.*g'(z^{(2)})$$
$$\Delta^{(l)}=(\delta^{(l+1)})^Ta^{(l)}$$
where the matrix dimensions are as follows:
$X_{wb}, a^{(1)}$: 5000 x 401 with intercept terms
$a^{(2)}, \delta^{(2)}, z^{(2)}$: 5000 x 25, without intercept terms
$a^{(3)}, y, \delta^{(3)}$: 5000 x 10
$\Theta^{(1)}$: 25 x 401 (but intercept terms will remain unchanged in gradient descent)
$\Theta^{(2)}$: 10 x 26 (but intercept terms will remain unchanged in gradient descent)
### 2.4 Regularized Neural Networks
To account for regularization, we can add an additional term after computing the gradient using backpropagation.
The formula are as follows:
$$\frac{\partial}{\partial\Theta_{ij}^{(l)}}J(\Theta) = D_{ij}^{(l)} = \frac{1}{m}\Delta_{ij}^{(l)}\qquad for\; j=0$$
$$\frac{\partial}{\partial\Theta_{ij}^{(l)}}J(\Theta) = D_{ij}^{(l)} = \frac{1}{m}\Delta_{ij}^{(l)}+\frac{\lambda}{m}\Theta_{ij}^{(l)}\qquad for\; j=1$$
Note that you should not regularize the first column of $\Theta$.
```
def nnCostFunction2(nn_params, input_lsize, hidden_lsize, num_labels, X, y, lmd):
""" computes the cost and gradient of the neural network. The
parameters for the neural network are "unrolled" into the vector
nn_params and need to be converted back into the weight matrices.
The returned parameter grad should be a "unrolled" vector of the
partial derivatives of the neural network.
X should already include bias terms
Y is a 2d matrix
"""
# number of training samples
m, n = X.shape
# restore Theta1 and Theta2 from nn_params
Theta1 = nn_params[:hidden_lsize*(input_lsize+1)].reshape((hidden_lsize, input_lsize+1))
Theta2 = nn_params[hidden_lsize*(input_lsize+1):].reshape((num_labels, hidden_lsize+1))
# forward propagation
z2 = X @ Theta1.T
a2 = sigmoid(z2)
a2_wb = np.concatenate((np.ones((m, 1)), a2), axis=1)
a3 = sigmoid(a2_wb @ Theta2.T) # i.e. H_theta
# Calculate cost
temp1 = -y * np.log(a3) - (1-y) * np.log(1-a3)
temp2 = np.sum((Theta1**2).flatten()) + np.sum((Theta2**2).flatten())
J = np.sum(temp1.flatten()) / m + lmd * temp2 / (2*m)
# Calculate gradient
delta3 = a3 - y # 5000x10
delta2 = delta3 @ Theta2[:, 1:] * sigmoidGradient(z2) # 5000x25
DT2 = delta3.T @ a2_wb # 10x26
DT1 = delta2.T @ X # 25x401, X is a1
Theta1_grad = DT1 / m
Theta2_grad = DT2 / m
# print("Theta1.shape is {}".format(Theta1.shape))
# print("Theta2.shape is {}".format(Theta2.shape))
# print("Theta1_grad.shape is {}".format(Theta1_grad.shape))
# print("Theta2_grad.shape is {}".format(Theta2_grad.shape))
# adding regularization
Theta1_grad[:, 1:] += lmd * Theta1[:, 1:] / m
Theta2_grad[:, 1:] += lmd * Theta2[:, 1:] / m
# unroll gradients (note in numpy, default order is by row first)
grad = np.concatenate((Theta1_grad.flatten(), Theta2_grad.flatten()))
return J, grad
# test gradient without regularization
lmd = 0
debug_J, debug_grad = nnCostFunction2(nn_params, input_lsize, hidden_lsize, num_labels, X_wb, Y, lmd)
print(debug_grad[:10])
print("Expected: [ 6.18712766e-05 0.00000000e+00 0.00000000e+00 4.15336892e-09 \n" +
"-5.29868773e-08 1.42184272e-07 1.59715308e-06 -8.89999550e-07 \n" +
"-1.45513067e-06 -4.08953470e-07]")
# test gradient with regularization
lmd = 3
debug_J, debug_grad = nnCostFunction2(nn_params, input_lsize, hidden_lsize, num_labels, X_wb, Y, lmd)
print(debug_grad[:10])
print("Expected: [ 6.18712766e-05 -6.33744979e-12 1.31648811e-12 2.87621717e-14 \n" +
"3.09854983e-10 -3.45710507e-09 -2.85907272e-08 -1.54564033e-08 \n" +
"2.10275154e-08 1.92242492e-08]")
```
### 2.6 Learning parameters using 'minimize' function
```
from scipy.optimize import minimize
# initial conidition, 1d array
init_Theta1 = randInitializeWeights(input_lsize, hidden_lsize)
init_Theta2 = randInitializeWeights(hidden_lsize, num_labels)
init_nn_params = np.concatenate((init_Theta1.flatten(), init_Theta2.flatten()))
# run optimization
result = minimize(nnCostFunction2, init_nn_params, args=(input_lsize, hidden_lsize, num_labels, X_wb, Y, lmd),
method='TNC', jac=True, options={'disp': True})
print(result.x)
# Obtain Theta1 and Theta2 from result.x
nn_params = result.x
Theta1 = nn_params[:hidden_lsize*(input_lsize+1)].reshape((hidden_lsize, input_lsize+1))
Theta2 = nn_params[hidden_lsize*(input_lsize+1):].reshape((num_labels, hidden_lsize+1))
def predict(X, Theta1, Theta2):
""" predicts output given network parameters Theta1 and Theta2 in Theta.
The prediction from the neural network will be the label that has the largest output.
"""
a2 = sigmoid(X @ Theta1.T)
# add intercept terms to a2
m, n = a2.shape
a2_wb = np.concatenate((np.ones((m, 1)), a2), axis=1)
a3 = sigmoid(a2_wb @ Theta2.T)
# print(a3[:10, :])
# apply np.argmax to the output matrix to find the predicted label
# for that training sample
p = np.argmax(a3, axis=1)
# p[p==10] = 0
return p # this is a 1d array
# prediction accuracy
pred = predict(X_wb, Theta1, Theta2)
print(pred.shape)
accuracy = np.sum((pred==y).astype(int))/m*100
print('Training accuracy is {:.2f}%'.format(accuracy))
# randomly show 10 images and corresponding results
# randomly select 10 data points to display
rand_indices = np.random.randint(0, 5000, size=10)
sel = X[rand_indices, :]
for i in range(10):
# Display predicted digit
print("Predicted {} for this image: ".format(pred[rand_indices[i]]))
# display image
fig, ax = plt.subplots(figsize=(2, 2))
ax.imshow(sel[i, :].reshape(20, 20).T, cmap=plt.cm.gray, vmin=-1, vmax=1)
ax.axis('off')
plt.show()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/SainiManisha/convnet-tutorial/blob/master/CNN_MNIST.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Convolutional Neural Networks

## Convolution
 
## Max Pooling

**Import the Library and Packages**
```
from matplotlib import pyplot as plt
import numpy as np
```
**Import Dataset**
```
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
```
**Normalize the Data**
```
train_images = train_images.reshape((60000, 28, 28, 1))
train_images = train_images.astype('float32') / 255
test_images = test_images.reshape((10000, 28, 28, 1))
test_images = test_images.astype('float32') / 255
print('train_images shape',train_images.shape)
print('train_labels shape',train_labels.shape)
print('test_images shape',test_images.shape)
print('test_labels shape',test_labels.shape)
```
**Change the class Label using one hot encoding**
```
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
```
**Build the model for feature extraction**
```
#@title
from tensorflow.keras import layers
from tensorflow.keras import models
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3),
activation='relu',
name='C1',
input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2),
name='M2'))
model.add(layers.Conv2D(64, (3, 3),
name='C3', activation='relu'))
model.add(layers.MaxPooling2D((2, 2), name='M4'))
model.add(layers.Conv2D(64, (3, 3),
activation='relu', name='C5'))
model.summary()
```

**Adding a classifier on top of the convnet**
```
model.add(layers.Flatten(name='F6'))
model.add(layers.Dense(64, activation='relu', name='FC7'))
model.add(layers.Dense(10, activation='softmax', name='FC8'))
model.summary()
```
**Compile and train the Model**
```
model.compile(optimizer='sgd',
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(
train_images, train_labels,
epochs=10, batch_size=180,
validation_split=0.3)
```
**Evaluate the Model**
```
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=0)
test_acc
```
**Plot the Graph of Accuracy and Loss in case of Model**
```
import matplotlib.pyplot as plt
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title('Model Accuracy')
plt.legend(['Train', 'Val'],loc='lower right')
plt.show()
import matplotlib.pyplot as plt
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Model loss')
plt.legend(['Train','val'], loc='lower right')
plt.show()
```
**Save the Model for reuse**
```
model.save('mnist_model.h5')
```
**Re-use the saved Model**
```
from tensorflow.keras.models import load_model
model = load_model('mnist_model.h5')
model.summary()
```
**Predict the class label of the test Images**
```
import matplotlib.pyplot as plt
import numpy as np
test_images_ss = test_images[:25]
predicted = model.predict(test_images_ss)
i = 0
plt.figure(figsize=[8, 8])
for (image, label) in zip(test_images_ss, predicted):
label = np.argmax(label)
image = image.reshape((28,28))
plt.subplot(5, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image, cmap=plt.cm.binary)
plt.xlabel(label)
i += 1
plt.suptitle("Predicted Labels")
plt.show()
```
**Visualizing the features learned by ConvNets**
```
from tensorflow.keras import models
layer_outputs = [layer.output for layer in model.layers[:5]]
activation_model = models.Model(inputs=model.input, outputs=layer_outputs)
activation_model.outputs
sample_test_image = np.expand_dims(train_images[10], axis=0)
sample_test_image.shape
activations = activation_model.predict(sample_test_image)
print(activations)
len(activations)
layer_names = [layer.name for layer in model.layers[:5]]
layer_names
col_size = 16
for layer_name, activation in zip(layer_names, activations):
max_val = activation.max()
activation /= max_val
num_filters = activation.shape[-1]
row_size = num_filters / col_size
print("\n" * 2)
print(layer_name)
plt.figure(figsize=[col_size * 2, row_size * 2])
for index in range(num_filters):
plt.subplot(row_size, col_size, index + 1)
plt.imshow(activation[0, :, :, index])
plt.axis("off")
plt.show()
```
**Confusion Matrix**
```
!pip install scikit-plot
from scikitplot.metrics import plot_confusion_matrix
logits = model.predict(test_images)
predicted = np.argmax(logits, axis=-1)
labels = np.argmax(test_labels, axis=-1)
plot_confusion_matrix(labels, predicted)
```
| github_jupyter |
# Tensorflow training
In this tutorial, you will train a mnist model in TensorFlow.
## Prerequisites
* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning (AML)
* Go through the [configuration notebook](../../../configuration.ipynb) to:
* install the AML SDK
* create a workspace and its configuration file (`config.json`)
[//]: # * Review the [tutorial](../train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb) on single-node TensorFlow training using the SDK
```
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
```
## Initialize workspace
Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`.
```
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep='\n')
```
## Specify existing Kubernetes Compute
```
from azureml.core.compute import ComputeTarget, KubernetesCompute
from azureml.core.compute_target import ComputeTargetException
# choose a name for your Kubernetes compute
compute_name = 'gpucluster-1x'
compute_target = ComputeTarget(workspace=ws, name=compute_name)
compute_target
```
## Create a Dataset for Files
A Dataset can reference single or multiple files in your datastores or public urls. The files can be of any format. Dataset provides you with the ability to download or mount the files to your compute. By creating a dataset, you create a reference to the data source location. If you applied any subsetting transformations to the dataset, they will be stored in the dataset as well. The data remains in its existing location, so no extra storage cost is incurred. [Learn More](https://aka.ms/azureml/howto/createdatasets)
```
#initialize file dataset
from azureml.core.dataset import Dataset
web_paths = ['http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz'
]
dataset = Dataset.File.from_files(path = web_paths)
```
you may want to register datasets using the register() method to your workspace so they can be shared with others, reused across various experiments, and referred to by name in your training script.
```
#register dataset to workspace
dataset = dataset.register(workspace = ws,
name = 'mnist dataset for Arc',
description='training and test dataset',
create_new_version=True)
# list the files referenced by dataset
dataset
```
## Train model on the Kubernetes compute
### Create a project directory
Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script, and any additional files your training script depends on.
```
import os
script_folder = './tf-resume-training'
os.makedirs(script_folder, exist_ok=True)
```
Copy the training script `tf_mnist_with_checkpoint.py` into this project directory.
```
import shutil
# the training logic is in the tf_mnist_with_checkpoint.py file.
shutil.copy('./tf_mnist_with_checkpoint.py', script_folder)
shutil.copy('./utils.py', script_folder)
```
### Create an experiment
Create an [Experiment](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#experiment) to track all the runs in your workspace for this distributed TensorFlow tutorial.
```
from azureml.core import Experiment
experiment_name = 'akse-arc-tf-training1'
experiment = Experiment(ws, name=experiment_name)
```
### Create ScriptRun
```
from azureml.core.environment import Environment
from azureml.core.conda_dependencies import CondaDependencies
env = Environment("env-tf")
cd = CondaDependencies.create(pip_packages=['azureml-dataset-runtime[pandas,fuse]', 'azureml-defaults','tensorflow==1.13.1','horovod==0.16.1'])
env.docker.base_image='mcr.microsoft.com/azureml/openmpi4.1.0-cuda11.0.3-cudnn8-ubuntu18.04'
env.python.conda_dependencies = cd
# Register environment to re-use later
env.register(workspace = ws)
from azureml.core import ScriptRunConfig
args=['--data-folder', dataset.as_named_input('mnist').as_mount()]
src = ScriptRunConfig(source_directory=script_folder,
script='tf_mnist_with_checkpoint.py',
compute_target=compute_target,
environment=env,
arguments=args)
```
In the above code, we passed our training data reference `ds_data` to our script's `--data-folder` argument. This will 1) mount our datastore on the remote compute and 2) provide the path to the data zip file on our datastore.
### Submit job
### Run your experiment . Note that this call is asynchronous.
```
run = experiment.submit(src)
print(run)
run
```
### Monitor your run
You can monitor the progress of the run with a Jupyter widget. Like the run submission, the widget is asynchronous and provides live updates every 10-15 seconds until the job completes.
| github_jupyter |
```
import pandas as pd
from pathlib import Path
from sklearn.ensemble import GradientBoostingRegressor
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.model_selection import learning_curve,RepeatedKFold
from sklearn.pipeline import make_pipeline
from yellowbrick.model_selection import LearningCurve
from yellowbrick.regressor import ResidualsPlot
from yellowbrick.regressor import PredictionError
from sklearn.metrics import mean_squared_error,mean_absolute_error,r2_score
from sklearn import metrics
from sklearn.externals import joblib
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from scipy import stats
from scipy.special import boxcox1p
from sklearn.linear_model import Lasso
from sklearn.feature_selection import SelectFromModel
from sklearn.compose import TransformedTargetRegressor
from sklearn.preprocessing import QuantileTransformer,PowerTransformer
from sklearn.preprocessing import RobustScaler,MinMaxScaler,StandardScaler
from sklearn.manifold import TSNE
%matplotlib inline
#dataframe final
df_final = pd.read_csv("../data/DF_train15noChangeContact_skempiAB_modeller_final.csv",index_col=0)
pdb_names = df_final.index
features_names = df_final.drop('ddG_exp',axis=1).columns
df_final.shape
df_final["ddG_exp"].max() - df_final["ddG_exp"].min()
f, ax = plt.subplots(figsize=(10, 7))
sns.distplot(df_final['ddG_exp']);
plt.savefig("Train15_Distribution.png",dpi=300,bbox_inches="tight")
# Split train and independent test data
X_train, X_test, y_train, y_test = train_test_split(df_final.drop('ddG_exp',axis=1), df_final['ddG_exp'],
test_size=0.2, random_state=13)
f, ax = plt.subplots(figsize=(10, 7))
sns.distplot(y_train, color="red", label="ddG_exp_train");
sns.distplot(y_test, color="skyblue", label="ddG_exp_test");
sns.distplot(y_train, fit=stats.norm);
# Get the fitted parameters used by the function
(mu, sigma) = stats.norm.fit(y_train)
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
#Now plot the distribution
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('ddG distribution')
#Get also the QQ-plot
fig = plt.figure()
res = stats.probplot(y_train, plot=plt)
plt.show()
```
# probably need to transform target variable
## Correlation
```
# join train data for Exploratory analisis of training data
train = X_train.join(y_train)
sns.set(font_scale=0.6)
#correlation matrix
corrmat = train.corr()
f, ax = plt.subplots(figsize=(14, 11))
sns.heatmap(corrmat, square=True,cbar_kws={"shrink": .8});
#plt.savefig("Train15_initCorr.png",dpi=300,bbox_inches="tight")
sns.set(font_scale=1.2)
#top 10. correlation matrix
k = 15 #number of variables for heatmap
cols = corrmat.nlargest(k, 'ddG_exp')['ddG_exp'].index
cm = np.corrcoef(train[cols].values.T)
f, ax = plt.subplots(figsize=(10, 7))
sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values);
#plt.savefig("Train15_initCorrTOP15.png",dpi=300,bbox_inches="tight")
sns.set(font_scale=1)
plt.subplots(figsize=(15, 5))
plt.subplot(1, 2, 1)
g = sns.regplot(x=train['van_der_waals_change'], y=train['ddG_exp'], fit_reg=False).set_title("Antes")
# Delete outliers
plt.subplot(1, 2, 2)
train = train.drop(train[(train['van_der_waals_change']>3)].index)
g = sns.regplot(x=train['van_der_waals_change'], y=train['ddG_exp'], fit_reg=False).set_title("Despues")
#plt.savefig("Train15_outlierVDWchange.png",dpi=600,bbox_inches="tight")
sns.set(font_scale=1)
plt.subplots(figsize=(15, 5))
plt.subplot(1, 2, 1)
g = sns.regplot(x=train['dg_change'], y=train['ddG_exp'], fit_reg=False).set_title("Antes")
# Delete outliers
plt.subplot(1, 2, 2)
train = train.drop(train[(train['dg_change'].abs()>8)].index)
g = sns.regplot(x=train['dg_change'], y=train['ddG_exp'], fit_reg=False).set_title("Despues")
#plt.savefig("Train15_outlierDgchange.png",dpi=600,bbox_inches="tight")
```
### NO missing values, skip this dataprocess
##
### Feature engeenering, checking interaction of sift contact with the highest correlated energetic feature
In order to treat this dataset, first I will check if adding interactions betwen some features improve corr, next i will check for skewess features. Finally i will write a custom transform class for every step.
```
y_train = train['ddG_exp']
X_train = train.drop('ddG_exp',axis=1)
```
### Check corr of new features
```
features_interaction_contactVDW = X_train.iloc[:,:15].mul(X_train["van_der_waals_change"],axis=0)# funciona mucho mejor
features_interaction_contactVDW.columns = features_interaction_contactVDW.columns.str[:]+"_vdw_change_interaction"
corr_matrix = features_interaction_contactVDW.corrwith(y_train,axis=0)#.abs()
#the matrix is symmetric so we need to extract upper triangle matrix without diagonal (k = 1)
print(corr_matrix.sort_values(ascending=False).round(6))
```
### Check skewness
```
skew_features = X_train.skew().sort_values(ascending=False)
skew_features
print(skew_features.to_csv())
```
### Check features by percetange of zero values
```
overfit = []
for i in X_train.columns:
counts = X_train[i].value_counts()
zeros = counts.iloc[0]
if zeros / len(X_train) * 100 >90.:
overfit.append(i)
print(overfit)
```
### make custom transformer for preprocess in pipeline
```
from sklearn.base import BaseEstimator, TransformerMixin
class FeaturesInteractions(BaseEstimator, TransformerMixin):
#Class constructor method that takes ..
def __init__(self, interaction1, interaction2 ):
self.interaction1 = interaction1
self.interaction2 = interaction2
#Return self nothing else to do here
def fit( self, X, y = None ):
return self
def transform(self, X , y=None ):
X_interactions = X.loc[:,self.interaction1].mul(X[self.interaction2],axis=0)
X_interactions.columns = X_interactions.columns.values+'/{}'.format(self.interaction2)
# set columns names
X = X.join(X_interactions)
return X
class SkewTransformer(BaseEstimator, TransformerMixin):
def __init__(self, threshold=0.6, method='quantile'):
self.threshold = threshold
self.method = method
#Return self nothing else to do here
def fit(self, X, y = None ):
skewes_ = X.skew().sort_values(ascending=False)
self.skew_features = skewes_[skewes_.abs() > self.threshold]
if self.method == 'quantile':
self.t = QuantileTransformer(output_distribution="normal",random_state=13)
self.t.fit(X[self.skew_features.index])
return self
def transform(self, X, y=None):
X[self.skew_features.index] = self.t.transform(X[self.skew_features.index])
return X
class ZeroThreshold(BaseEstimator, TransformerMixin):
def __init__(self, threshold=90.):
self.threshold = threshold
def fit(self, X, y = None ):
self.feature_names = X.columns
self.overfit = []
for i in X.columns:
counts = X[i].value_counts()
zeros = counts.iloc[0]
if zeros / len(X) * 100 >self.threshold:
self.overfit.append(i)
return self
def transform(self, X, y=None):
X.drop(self.overfit,axis=1,inplace=True)
return X
```
# Modeling
```
X_train.shape, y_train.shape, X_test.shape, y_test.shape
#1) ORIGINAL
## Pipeline preprocessing
interactions = FeaturesInteractions(interaction1=X_train.columns[:15].tolist(),interaction2="van_der_waals_change")
skewness = SkewTransformer(threshold=0.6,method='quantile')
zeroth = ZeroThreshold(threshold=90.)
#2)
rf_model = GradientBoostingRegressor(random_state=13)
#3) Crear pipeline
#pipeline1 = make_pipeline(interactions,skewness, zeroth, rf_model)
pipeline1 = make_pipeline(interactions,skewness,zeroth, rf_model)
# Use transformed target regressor
# regr_trans = TransformedTargetRegressor(regressor=pipeline1,
# transformer=PowerTransformer(output_distribution='normal',random_state=13))
# # grid params
# param_grid = {
# 'regressor__gradientboostingregressor__max_depth': [9],
# 'regressor__gradientboostingregressor__max_features': ['sqrt'],
# 'regressor__gradientboostingregressor__min_samples_leaf': [21],
# 'regressor__gradientboostingregressor__min_samples_split': [2],
# 'regressor__gradientboostingregressor__n_estimators': [200],
# 'regressor__gradientboostingregressor__subsample':[0.7],
# 'regressor__gradientboostingregressor__learning_rate':[0.05],
# 'regressor__gradientboostingregressor__loss':["huber"],
# 'regressor__gradientboostingregressor__alpha':[0.4]}
param_grid = {
'gradientboostingregressor__max_depth': [6],
'gradientboostingregressor__max_features': ['sqrt'],
'gradientboostingregressor__min_samples_leaf': [30],
'gradientboostingregressor__min_samples_split': [2],
'gradientboostingregressor__n_estimators': [100],
'gradientboostingregressor__subsample':[0.8],
'gradientboostingregressor__learning_rate':[0.05],
'gradientboostingregressor__loss':["huber"],
'gradientboostingregressor__alpha':[0.9]}
cv = RepeatedKFold(n_splits=10,n_repeats=10,random_state=13)
# Instantiate the grid search model
grid1 = GridSearchCV(pipeline1, param_grid, verbose=5, n_jobs=-1,cv=cv,scoring=['neg_mean_squared_error','r2'],
refit='neg_mean_squared_error',return_train_score=True)
grid1.fit(X_train,y_train)
# index of best scores
rmse_bestCV_test_index = grid1.cv_results_['mean_test_neg_mean_squared_error'].argmax()
rmse_bestCV_train_index = grid1.cv_results_['mean_train_neg_mean_squared_error'].argmax()
r2_bestCV_test_index = grid1.cv_results_['mean_test_r2'].argmax()
r2_bestCV_train_index = grid1.cv_results_['mean_train_r2'].argmax()
# scores
rmse_bestCV_test_score = grid1.cv_results_['mean_test_neg_mean_squared_error'][rmse_bestCV_test_index]
rmse_bestCV_test_std = grid1.cv_results_['std_test_neg_mean_squared_error'][rmse_bestCV_test_index]
rmse_bestCV_train_score = grid1.cv_results_['mean_train_neg_mean_squared_error'][rmse_bestCV_train_index]
rmse_bestCV_train_std = grid1.cv_results_['std_train_neg_mean_squared_error'][rmse_bestCV_train_index]
r2_bestCV_test_score = grid1.cv_results_['mean_test_r2'][r2_bestCV_test_index]
r2_bestCV_test_std = grid1.cv_results_['std_test_r2'][r2_bestCV_test_index]
r2_bestCV_train_score = grid1.cv_results_['mean_train_r2'][r2_bestCV_train_index]
r2_bestCV_train_std = grid1.cv_results_['std_train_r2'][r2_bestCV_train_index]
print('CV test RMSE {:f} +/- {:f}'.format(np.sqrt(-rmse_bestCV_test_score),np.sqrt(rmse_bestCV_test_std)))
print('CV train RMSE {:f} +/- {:f}'.format(np.sqrt(-rmse_bestCV_train_score),np.sqrt(rmse_bestCV_train_std)))
print('DIFF RMSE {}'.format(np.sqrt(-rmse_bestCV_test_score)-np.sqrt(-rmse_bestCV_train_score)))
print('CV test r2 {:f} +/- {:f}'.format(r2_bestCV_test_score,r2_bestCV_test_std))
print('CV train r2 {:f} +/- {:f}'.format(r2_bestCV_train_score,r2_bestCV_train_std))
print(r2_bestCV_train_score-r2_bestCV_test_score)
print("",grid1.best_params_)
y_test_pred = grid1.best_estimator_.predict(X_test)
y_train_pred = grid1.best_estimator_.predict(X_train)
print("\nRMSE for test dataset: {}".format(np.round(np.sqrt(mean_squared_error(y_test, y_test_pred)), 2)))
print("RMSE for train dataset: {}".format(np.round(np.sqrt(mean_squared_error(y_train, y_train_pred)), 2)))
print("pearson corr test {:f}".format(np.corrcoef(y_test_pred,y_test.values.ravel())[0][1]))
print("pearson corr train {:f}".format(np.corrcoef(y_train_pred,y_train.values.ravel())[0][1]))
print('R2 test',r2_score(y_test,y_test_pred))
print('R2 train',r2_score(y_train,y_train_pred))
CV test RMSE 1.629430 +/- 0.799343
CV train RMSE 1.278502 +/- 0.225925
DIFF RMSE 0.35092797677433385
CV test r2 0.243458 +/- 0.079707
CV train r2 0.537599 +/- 0.009211
0.29414037095192286
{'gradientboostingregressor__alpha': 0.9, 'gradientboostingregressor__learning_rate': 0.05, 'gradientboostingregressor__loss': 'huber', 'gradientboostingregressor__max_depth': 6, 'gradientboostingregressor__max_features': 'sqrt', 'gradientboostingregressor__min_samples_leaf': 30, 'gradientboostingregressor__min_samples_split': 2, 'gradientboostingregressor__n_estimators': 100, 'gradientboostingregressor__subsample': 0.8}
RMSE for test dataset: 1.64
RMSE for train dataset: 1.29
pearson corr 0.591408
R2 test 0.3178301850116666
R2 train 0.5310086844583202
visualizer = ResidualsPlot(grid1.best_estimator_,title='Residuos para GradientBoostingRegressor',hist=False)
visualizer.fit(X_train, y_train.values.ravel()) # Fit the training data to the model
visualizer.score(X_test, y_test.values.ravel()) # Evaluate the model on the test data
visualizer.finalize()
visualizer.ax.set_xlabel('Valor Predicho')
visualizer.ax.set_ylabel('Residuos')
plt.savefig("GBT_R2_train15.png",dpi=600,bbox_inches="tight")
#visualizer.poof() # Draw/show/poof the data
perror = PredictionError(grid1.best_estimator_, title='Error de Entrenamiento para GradientBoostingRegressor')
perror.fit(X_train, y_train.values.ravel()) # Fit the training data to the visualizer
perror.score(X_train, y_train.values.ravel()) # Evaluate the model on the test data
perror.finalize()
plt.savefig("GBT_TrainingError_train15.png",dpi=300,bbox_inches="tight")
perror = PredictionError(grid1.best_estimator_, title='Error de Predicciรณn para GradientBoostingRegressor')
perror.fit(X_train, y_train.values.ravel()) # Fit the training data to the visualizer
perror.score(X_test, y_test.values.ravel()) # Evaluate the model on the test data
perror.finalize()
plt.savefig("GBT_PredictionError_train15.png",dpi=600,bbox_inches="tight")
#g = perror.poof()
full_data = pd.concat([X_train, X_test])
y_full = pd.concat([y_train, y_test])
viz = LearningCurve(grid1.best_estimator_, cv=cv, n_jobs=-1,scoring='neg_mean_squared_error',
train_sizes=np.linspace(0.2, 1.0, 10),title='Curva de aprendizaje para GradientBoostingRegressor')
viz.fit(full_data, y_full)
viz.finalize()
viz.ax.set_xlabel('Muestras de entrenamiento')
viz.ax.set_ylabel('Score')
plt.savefig("GBT_LearningCurve_train15.png",dpi=600,bbox_inches="tight")
#viz.poof()
print("RMSE CV Train {}".format(np.sqrt(-viz.train_scores_mean_[-1])))
print("RMSE CV Test {}".format(np.sqrt(-viz.test_scores_mean_[-1])))
np.sqrt(viz.test_scores_std_)
final_gbt = grid1.best_estimator_.fit(full_data,y_full)
# save final model
joblib.dump(final_gbt, 'GBTmodel_train15skempiAB_FINAL.overf.pkl')
feature_importance = final_gbt.named_steps['gradientboostingregressor'].feature_importances_
#feature_importance = feature_importance * 100.0 # * (feature_importance / feature_importance.max())
idx_features = feature_importance.argsort()[::-1]
fnames = final_gbt.named_steps.zerothreshold.feature_names.drop(final_gbt.named_steps.zerothreshold.overfit)
plt.figure(figsize=(15,4))
plt.bar(np.arange(len(fnames)), feature_importance[idx_features])
plt.xticks(range(len(fnames)),fnames[idx_features],rotation=90)
plt.autoscale(enable=True, axis='x', tight=True)
plt.xlabel(u"Importancia de caracteristicas")
plt.savefig("GBT_featureImportance.png",dpi=600,bbox_inches="tight")
importances = list(final_gbt.named_steps['gradientboostingregressor'].feature_importances_)
feature_list = fnames
# List of tuples with variable and importance
feature_importances = [(feature, round(importance, 4)) for feature, importance in zip(feature_list, importances)]
# Sort the feature importances by most important first
feature_importances = sorted(feature_importances, key = lambda x: x[1], reverse = True)
# Print out the feature and importances
[print('Variable: {:20} Importance: {}'.format(*pair)) for pair in feature_importances]
```
## Salvar modelo final, entrenado con el total de lso datos
```
full_prediction = final_gbt.predict(full_data)
full_pred_bin = np.where(np.abs(full_prediction) > 0.5,1,0)
full_true_bin = np.where(y_full > 0.5,1,0)
from sklearn.metrics import accuracy_score,classification_report,roc_auc_score,confusion_matrix
print(classification_report(full_true_bin,full_pred_bin))
rmse_test = np.round(np.sqrt(mean_squared_error(y_test, y_test_pred)), 3)
df_pred = pd.DataFrame({"Predicted ddG(kcal/mol)": y_test_pred, "Actual ddG(kcal/mol)": y_test.values.ravel()})
pearsonr_test = round(df_pred.corr().iloc[0,1],3)
g = sns.regplot(x="Actual ddG(kcal/mol)", y="Predicted ddG(kcal/mol)",data=df_pred)
plt.title("Predicted vs Experimental ddG (Independent set: 123 complexes)")
plt.text(-2,3,"pearsonr = %s" %pearsonr_test)
plt.text(4.5,-0.5,"RMSE = %s" %rmse_test)
#plt.savefig("RFmodel_300_testfit.png",dpi=600)
df_train_pred = pd.DataFrame({"Predicted ddG(kcal/mol)": y_train.values.ravel(), "Actual ddG(kcal/mol)": y_train_pred})
pearsonr_train = round(df_train_pred.corr().iloc[0,1],3)
rmse_train = np.round(np.sqrt(mean_squared_error(y_train.values.ravel(), y_train_pred)), 3)
g = sns.regplot(x="Actual ddG(kcal/mol)", y="Predicted ddG(kcal/mol)",data=df_train_pred)
plt.text(-0.4,6.5,"pearsonr = %s" %pearsonr_train)
plt.text(3.5,-2.5,"RMSE = %s" %rmse_train)
plt.title("Predicted vs Experimental ddG (Train set: 492 complexes)")
#plt.savefig("RFmodel_300_trainfit.png",dpi=600)
rf_model = grid1.best_estimator_.named_steps["randomforestregressor"]
importances = list(rf_model.feature_importances_)
feature_list = df_final.columns
# List of tuples with variable and importance
feature_importances = [(feature, round(importance, 4)) for feature, importance in zip(feature_list, importances)]
# Sort the feature importances by most important first
feature_importances = sorted(feature_importances, key = lambda x: x[1], reverse = True)
# Print out the feature and importances
[print('Variable: {:20} Importance: {}'.format(*pair)) for pair in feature_importances]
RepeatedKFold?
# Algorithms used for modeling
from sklearn.linear_model import ElasticNetCV, LassoCV, BayesianRidge, RidgeCV
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import ShuffleSplit
from sklearn.svm import SVR
import xgboost as xgb
e_alphas = [0.0001, 0.0002, 0.0003, 0.0004, 0.0005, 0.0006, 0.0007]
e_l1ratio = [0.8, 0.85, 0.9, 0.95, 0.99, 1]
alphas_alt = [14.5, 14.6, 14.7, 14.8, 14.9, 15, 15.1, 15.2, 15.3, 15.4, 15.5]
alphas2 = [5e-05, 0.0001, 0.0002, 0.0003, 0.0004, 0.0005, 0.0006, 0.0007, 0.0008]
ridge = make_pipeline(MinMaxScaler(), RidgeCV(alphas=alphas_alt, cv=cv))
lasso = make_pipeline(MinMaxScaler(), LassoCV(max_iter=1e7, alphas=alphas2, random_state=42, cv=cv))
elasticnet = make_pipeline(MinMaxScaler(), ElasticNetCV(max_iter=1e7, alphas=e_alphas, cv=cv, l1_ratio=e_l1ratio))
svr = make_pipeline(MinMaxScaler(), SVR(C= 20, epsilon= 0.008, gamma=0.0003,))
gb = make_pipeline(GradientBoostingRegressor())
bayesianridge = make_pipeline(MinMaxScaler(),BayesianRidge())
rf = make_pipeline(RandomForestRegressor())
xgbr = make_pipeline(xgb.XGBRegressor())
#Machine Learning Algorithm (MLA) Selection and Initialization
models = [ridge, elasticnet, lasso, gb, bayesianridge, rf, xgbr]
# First I will use ShuffleSplit as a way of randomising the cross validation samples.
cvr = RepeatedKFold(n_splits=10,n_repeats=5,random_state=13)
#create table to compare MLA metrics
columns = ['Name', 'Parameters', 'Train Accuracy Mean', 'Test Accuracy']
before_model_compare = pd.DataFrame(columns = columns)
#index through models and save performance to table
row_index = 0
for alg in models:
#set name and parameters
model_name = alg.__class__.__name__
before_model_compare.loc[row_index, 'Name'] = model_name
before_model_compare.loc[row_index, 'Parameters'] = str(alg.get_params())
alg.fit(X_train, y_train.values.ravel())
#score model with cross validation: http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_validate.html#sklearn.model_selection.cross_validate
training_results = np.sqrt((-cross_val_score(alg, X_train, y_train.values.ravel(), cv = cvr, scoring= 'neg_mean_squared_error')).mean())
#training_results = cross_val_score(alg, X_train, y_train, cv = shuff, scoring= 'r2').mean()
test_results = np.sqrt(((y_test.values.ravel()-alg.predict(X_test))**2).mean())
#test_results = r2_score(y_pred=alg.predict(X_test),y_true=y_test)
before_model_compare.loc[row_index, 'Train Accuracy Mean'] = (training_results)#*100
before_model_compare.loc[row_index, 'Test Accuracy'] = (test_results)#*100
row_index+=1
print(row_index, alg.__class__.__name__, 'trained...')
decimals = 3
before_model_compare['Train Accuracy Mean'] = before_model_compare['Train Accuracy Mean'].apply(lambda x: round(x, decimals))
before_model_compare['Test Accuracy'] = before_model_compare['Test Accuracy'].apply(lambda x: round(x, decimals))
before_model_compare
```
| github_jupyter |
## Image Cleaner Widget
fastai offers several widgets to support the workflow of a deep learning practitioner. The purpose of the widgets are to help you organize, clean, and prepare your data for your model. Widgets are separated by data type.
```
from fastai.vision import *
from fastai.widgets import DatasetFormatter, ImageCleaner, ImageDownloader, download_google_images
from fastai.gen_doc.nbdoc import *
%reload_ext autoreload
%autoreload 2
path = untar_data(URLs.MNIST_SAMPLE)
data = ImageDataBunch.from_folder(path)
learn = cnn_learner(data, models.resnet18, metrics=error_rate)
learn.fit_one_cycle(2)
learn.save('stage-1')
```
We create a databunch with all the data in the training set and no validation set (DatasetFormatter uses only the training set)
```
db = (ImageList.from_folder(path)
.split_none()
.label_from_folder()
.databunch())
learn = cnn_learner(db, models.resnet18, metrics=[accuracy])
learn.load('stage-1');
show_doc(DatasetFormatter)
```
The [`DatasetFormatter`](/widgets.image_cleaner.html#DatasetFormatter) class prepares your image dataset for widgets by returning a formatted [`DatasetTfm`](/vision.data.html#DatasetTfm) based on the [`DatasetType`](/basic_data.html#DatasetType) specified. Use `from_toplosses` to grab the most problematic images directly from your learner. Optionally, you can restrict the formatted dataset returned to `n_imgs`.
```
show_doc(DatasetFormatter.from_similars)
from fastai.gen_doc.nbdoc import *
from fastai.widgets.image_cleaner import *
show_doc(DatasetFormatter.from_toplosses)
show_doc(ImageCleaner)
```
[`ImageCleaner`](/widgets.image_cleaner.html#ImageCleaner) is for cleaning up images that don't belong in your dataset. It renders images in a row and gives you the opportunity to delete the file from your file system. To use [`ImageCleaner`](/widgets.image_cleaner.html#ImageCleaner) we must first use `DatasetFormatter().from_toplosses` to get the suggested indices for misclassified images.
```
ds, idxs = DatasetFormatter().from_toplosses(learn)
ImageCleaner(ds, idxs, path)
```
[`ImageCleaner`](/widgets.image_cleaner.html#ImageCleaner) does not change anything on disk (neither labels or existence of images). Instead, it creates a 'cleaned.csv' file in your data path from which you need to load your new databunch for the files to changes to be applied.
```
df = pd.read_csv(path/'cleaned.csv', header='infer')
# We create a databunch from our csv. We include the data in the training set and we don't use a validation set (DatasetFormatter uses only the training set)
np.random.seed(42)
db = (ImageList.from_df(df, path)
.split_none()
.label_from_df()
.databunch(bs=64))
learn = cnn_learner(db, models.resnet18, metrics=error_rate)
learn = learn.load('stage-1')
```
You can then use [`ImageCleaner`](/widgets.image_cleaner.html#ImageCleaner) again to find duplicates in the dataset. To do this, you can specify `duplicates=True` while calling ImageCleaner after getting the indices and dataset from `.from_similars`. Note that if you are using a layer's output which has dimensions <code>(n_batches, n_features, 1, 1)</code> then you don't need any pooling (this is the case with the last layer). The suggested use of `.from_similars()` with resnets is using the last layer and no pooling, like in the following cell.
```
ds, idxs = DatasetFormatter().from_similars(learn, layer_ls=[0,7,1], pool=None)
ImageCleaner(ds, idxs, path, duplicates=True)
show_doc(ImageDownloader)
```
[`ImageDownloader`](/widgets.image_downloader.html#ImageDownloader) widget gives you a way to quickly bootstrap your image dataset without leaving the notebook. It searches and downloads images that match the search criteria and resolution / quality requirements and stores them on your filesystem within the provided `path`.
Images for each search query (or label) are stored in a separate folder within `path`. For example, if you pupulate `tiger` with a `path` setup to `./data`, you'll get a folder `./data/tiger/` with the tiger images in it.
[`ImageDownloader`](/widgets.image_downloader.html#ImageDownloader) will automatically clean up and verify the downloaded images with [`verify_images()`](/vision.data.html#verify_images) after downloading them.
```
path = Config.data_path()/'image_downloader'
os.makedirs(path, exist_ok=True)
ImageDownloader(path)
```
#### Downloading images in python scripts outside Jupyter notebooks
```
path = Config.data_path()/'image_downloader'
files = download_google_images(path, 'aussie shepherd', size='>1024*768', n_images=30)
len(files)
show_doc(download_google_images)
```
After populating images with [`ImageDownloader`](/widgets.image_downloader.html#ImageDownloader), you can get a an [`ImageDataBunch`](/vision.data.html#ImageDataBunch) by calling `ImageDataBunch.from_folder(path, size=size)`, or using the data block API.
```
# Setup path and labels to search for
path = Config.data_path()/'image_downloader'
labels = ['boston terrier', 'french bulldog']
# Download images
for label in labels:
download_google_images(path, label, size='>400*300', n_images=50)
# Build a databunch and train!
src = (ImageList.from_folder(path)
.split_by_rand_pct()
.label_from_folder()
.transform(get_transforms(), size=224))
db = src.databunch(bs=16, num_workers=0)
learn = cnn_learner(db, models.resnet34, metrics=[accuracy])
learn.fit_one_cycle(3)
```
#### Downloading more than a hundred images
To fetch more than a hundred images, [`ImageDownloader`](/widgets.image_downloader.html#ImageDownloader) uses `selenium` and `chromedriver` to scroll through the Google Images search results page and scrape image URLs. They're not required as dependencies by default. If you don't have them installed on your system, the widget will show you an error message.
To install `selenium`, just `pip install selenium` in your fastai environment.
**On a mac**, you can install `chromedriver` with `brew cask install chromedriver`.
**On Ubuntu**
Take a look at the latest Chromedriver version available, then something like:
```
wget https://chromedriver.storage.googleapis.com/2.45/chromedriver_linux64.zip
unzip chromedriver_linux64.zip
```
Note that downloading under 100 images doesn't require any dependencies other than fastai itself, however downloading more than a hundred images [uses `selenium` and `chromedriver`](/widgets.image_cleaner.html#Downloading-more-than-a-hundred-images).
`size` can be one of:
```
'>400*300'
'>640*480'
'>800*600'
'>1024*768'
'>2MP'
'>4MP'
'>6MP'
'>8MP'
'>10MP'
'>12MP'
'>15MP'
'>20MP'
'>40MP'
'>70MP'
```
## Methods
## Undocumented Methods - Methods moved below this line will intentionally be hidden
```
show_doc(ImageCleaner.make_dropdown_widget)
show_doc(ImageCleaner.next_batch)
show_doc(DatasetFormatter.sort_idxs)
show_doc(ImageCleaner.make_vertical_box)
show_doc(ImageCleaner.relabel)
show_doc(DatasetFormatter.largest_indices)
show_doc(ImageCleaner.delete_image)
show_doc(ImageCleaner.empty)
show_doc(ImageCleaner.empty_batch)
show_doc(DatasetFormatter.comb_similarity)
show_doc(ImageCleaner.get_widgets)
show_doc(ImageCleaner.write_csv)
show_doc(ImageCleaner.create_image_list)
show_doc(ImageCleaner.render)
show_doc(DatasetFormatter.get_similars_idxs)
show_doc(ImageCleaner.on_delete)
show_doc(ImageCleaner.make_button_widget)
show_doc(ImageCleaner.make_img_widget)
show_doc(DatasetFormatter.get_actns)
show_doc(ImageCleaner.batch_contains_deleted)
show_doc(ImageCleaner.make_horizontal_box)
show_doc(DatasetFormatter.get_toplosses_idxs)
show_doc(DatasetFormatter.padded_ds)
```
## New Methods - Please document or move to the undocumented section
| github_jupyter |
# MaterialsCoord benchmarking โย symmetry of bonding algorithms
Several near neighbor methods do not produce symmetrical bonding. For example, if site A is bonded to site B, it is not guaranteed that site B will be bonded to site A. In the MaterialsCoord benchmark we enforce symmetrical bonding for all algorithms. In this notebook, we assess how unsymmetrical the bonding is for each near neighbor method.
*Written using:*
- MaterialsCoord==0.2.0
*Authors: Alex Ganose (05/20/20)*
---
First, lets initialize the near neighbor methods we are interested in.
```
from pymatgen.analysis.local_env import BrunnerNN_reciprocal, EconNN, JmolNN, \
MinimumDistanceNN, MinimumOKeeffeNN, MinimumVIRENN, \
VoronoiNN, CrystalNN
nn_methods = [
MinimumDistanceNN(), MinimumOKeeffeNN(), MinimumVIRENN(), JmolNN(),
EconNN(tol=0.5), BrunnerNN_reciprocal(), VoronoiNN(tol=0.5), CrystalNN()
]
```
Next, import the benchmark and choose which structures we are interested in.
```
from materialscoord.core import Benchmark
structure_groups = ["common_binaries", "elemental", "A2BX4", "ABX3", "ABX4"]
bm = Benchmark.from_structure_group(structure_groups)
```
Enforcing symmetry always increases the number of assigned bonds. To assess the symmetry, we therefore calculate the number of additional bonds resulting from enforcing symmetrical bonding. Calculating the coordination number from a `StructureGraph` object (as returned by `NearNeighbors.get_bonded_structure()`) always enforces symmetry. In contrast, calculating the coordination number directly from the `NearNeighbors.get_cn()` method does not enforce symmetry.
```
import numpy as np
from tqdm.auto import tqdm
symmetry_results = []
no_symmetry_results = []
for nn_method in tqdm(nn_methods):
nn_symmetry_cns = []
nn_no_symmetry_cns = []
for structure in bm.structures.values():
bs = nn_method.get_bonded_structure(structure)
for site_idx in range(len(structure)):
nn_symmetry_cns.append(bs.get_coordination_of_site(site_idx))
nn_no_symmetry_cns.append(nn_method.get_cn(structure, site_idx))
symmetry_results.append(nn_symmetry_cns)
no_symmetry_results.append(nn_no_symmetry_cns)
symmetry_results = np.array(symmetry_results)
no_symmetry_results = np.array(no_symmetry_results)
import pandas as pd
symmetry_totals = symmetry_results.sum(axis=1)
no_symmetry_totals = no_symmetry_results.sum(axis=1)
no_symmetry_norm = no_symmetry_totals / symmetry_totals
symmetry_extra = 1 - no_symmetry_norm
symmetry_df = pd.DataFrame(
columns=[n.__class__.__name__ for n in nn_methods],
data=[no_symmetry_norm, symmetry_extra],
index=["without symmetry", "with symmetry"]
)
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path
sns.set(font="Helvetica", font_scale=1.3, rc={"figure.figsize": (7, 7)})
sns.set_style("white", {"axes.edgecolor": "black", "axes.linewidth": 1.3})
plt.style.use({"mathtext.fontset": "custom", "mathtext.rm": "Arial", "axes.grid.axis": "x"})
symmetry_df = symmetry_df.rename(columns={"BrunnerNN_reciprocal": "BrunnerNN"})
ax = symmetry_df.T.plot(kind='bar', stacked=True)
ax.set_xticklabels(symmetry_df.columns, rotation=60)
ax.legend(frameon=False, loc="upper left", bbox_to_anchor=(1, 1))
ax.set(ylabel="Fraction of bonds assigned", xlabel="", ylim=(0, 1))
ax.tick_params(axis='y', which='major', size=10, width=1, color='k', left=True, direction="in")
plt.savefig(Path("plots/symmetry.pdf"), bbox_inches="tight")
plt.show()
! open .
```
| github_jupyter |
# Python Data Analytics
<img src="images/pandas_logo.png" alt="pandas" style="width: 400px;"/>
Pandas is a numerical package used extensively in data science. You can call the install the ``pandas`` package by
```
pip install pandas
```
Like ``numpy``, the underlying routines are written in C with improved performance
<a href="https://colab.research.google.com/github/ryan-leung/PHYS4650_Python_Tutorial/blob/master/notebooks/04-Introduction-to-Pandas.ipynb"><img align="right" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory">
</a>
```
import pandas
pandas.__version__
import pandas as pd
import numpy as np
```
# Built-In Documentation in jupyter
For example, to display all the contents of the pandas namespace, you can type
```ipython
In [3]: pd.<TAB>
```
And to display Pandas's built-in documentation, you can use this:
```ipython
In [4]: pd?
```
# The Pandas Series Object
A Pandas Series is a one-dimensional array of indexed data.
```
data = pd.Series([1., 2., 3., 4.])
data
data = pd.Series([1, 2, 3, 4])
data
```
To retrieve back the underlying numpy array, we have the values attribute
```
data.values
```
The ``index`` is an array-like object of type ``pd.Index``.
```
data.index
```
Slicing and indexing just like Python standard ``list``
```
data[1]
data[1:3]
```
# The Pandas Index
The index is useful to denote each record, the datatypes of the index can be varied. You can think of another numpy array binded to the data array.
```
data = pd.Series([1, 2, 3, 4],
index=['a', 'b', 'c', 'd'])
data
```
If we supply a dictionary to the series, it will be constructed with an index.
By default, a ``Series`` will be created where the index is drawn from the sorted keys.
```
location = {
'Berlin': (52.5170365, 13.3888599),
'London': (51.5073219, -0.1276474),
'Sydney': (-33.8548157, 151.2164539),
'Tokyo': (34.2255804, 139.294774527387),
'Paris': (48.8566101, 2.3514992),
'Moscow': (46.7323875, -117.0001651)
}
location = pd.Series(location)
location
location['Berlin']
```
Unlike a dictionary, though, the Series also supports array-style operations such as slicing
```
location['London':'Paris']
```
# The Pandas DataFrame Object
The pandas dataframe object is a very powerful ``table`` like object.
```
location = {
'Berlin': (52.5170365, 13.3888599),
'London': (51.5073219, -0.1276474),
'Sydney': (-33.8548157, 151.2164539),
'Tokyo': (34.2255804, 139.294774527387),
'Paris': (48.8566101, 2.3514992),
'Moscow': (46.7323875, -117.0001651)
}
location = pd.DataFrame(location)
location
# Switching rows to columns is as easy as a transpose
location.T
# Change the columns by .columns attribute
location = location.T
location.columns = ['lat', 'lon']
location
location.index
location.columns
```
# Read Data
pandas has built-in data readers, you can type ``pd.read<TAB>`` to see what data format does it support:

we will focus in csv file which is widely used
We have some data downloaded from airbnb, you can find it in the folder, you may also download the file by executing the following code:
```
import urllib.request
urllib.request.urlretrieve(
'http://data.insideairbnb.com/taiwan/northern-taiwan/taipei/2018-11-27/visualisations/listings.csv',
'airbnb_taiwan_listing.csv'
)
urllib.request.urlretrieve(
'http://data.insideairbnb.com/china/hk/hong-kong/2018-11-12/visualisations/listings.csv',
'airbnb_hongkong_listing.csv'
)
```
# Read CSV files
```
airbnb_taiwan = pd.read_csv('airbnb_taiwan_listing.csv')
airbnb_taiwan
airbnb_hongkong = pd.read_csv('airbnb_hongkong_listing.csv')
airbnb_hongkong
```
# Filter data
```
mask = airbnb_hongkong['price'] > 1000
airbnb_hongkong[mask]
# In one line :
airbnb_taiwan[airbnb_taiwan['price'] > 4000]
```
# Missing Data in Pandas
Missing data is very important in pandas dataframe/series operations. Pandas do element-to-element operations based on index. If the index does not match, it will produce a not-a-number (NaN) results.
```
A = pd.Series([2, 4, 6], index=[0, 1, 2])
B = pd.Series([1, 3, 5], index=[1, 2, 3])
A + B
A.add(B, fill_value=0)
```
The following table lists the upcasting conventions in Pandas when NA values are introduced:
|Typeclass | Conversion When Storing NAs | NA Sentinel Value |
|--------------|-----------------------------|------------------------|
| ``floating`` | No change | ``np.nan`` |
| ``object`` | No change | ``None`` or ``np.nan`` |
| ``integer`` | Cast to ``float64`` | ``np.nan`` |
| ``boolean`` | Cast to ``object`` | ``None`` or ``np.nan`` |
Pandas treats ``None`` and ``NaN`` as essentially interchangeable for indicating missing or null values. They are convention functions to replace and find these values:
- ``isnull()``: Generate a boolean mask indicating missing values
- ``notnull()``: Opposite of ``isnull()``
- ``dropna()``: Return a filtered version of the data
- ``fillna()``: Return a copy of the data with missing values filled or imputed
```
# Fill Zero
(A + B).fillna(0)
# forward-fill
(A + B).fillna(method='ffill')
# back-fill
(A + B).fillna(method='bfill')
```
# Data Aggregations
we will use back the airbnb data to demonstrate data aggerations
```
airbnb_hongkong['price'].describe()
```
The following table summarizes some other built-in Pandas aggregations:
| Aggregation | Description |
|--------------------------|---------------------------------|
| ``count()`` | Total number of items |
| ``first()``, ``last()`` | First and last item |
| ``mean()``, ``median()`` | Mean and median |
| ``min()``, ``max()`` | Minimum and maximum |
| ``std()``, ``var()`` | Standard deviation and variance |
| ``mad()`` | Mean absolute deviation |
| ``prod()`` | Product of all items |
| ``sum()`` | Sum of all items |
```
data_grouped = airbnb_hongkong.groupby(['neighbourhood'])
data_mean = data_grouped['price'].mean()
data_mean
data_mean = airbnb_taiwan.groupby(['neighbourhood'])['price'].mean()
data_mean
airbnb_taiwan.groupby(['room_type']).id.count()
airbnb_hongkong.groupby(['room_type']).id.count()
airbnb_taiwan.groupby(['room_type'])['price'].describe()
airbnb_hongkong.groupby(['room_type'])['price'].describe()
```
# Combining Two or more dataframe
```
airbnb = pd.concat([airbnb_taiwan, airbnb_hongkong], keys=['taiwan', 'hongkong'])
airbnb
airbnb.index
airbnb.index = airbnb.index.droplevel(level=1)
airbnb.index
airbnb.groupby(['room_type', airbnb.index])['price'].describe()
```
# Easy Plotting in pandas
```
airbnb_taiwan.groupby(['room_type']).id.count()
%matplotlib inline
c = airbnb_taiwan.groupby(['room_type']).id.count()
c.plot.bar()
c = airbnb_taiwan.groupby(['room_type']).id.count().rename("count")
d = airbnb_taiwan.id.count()
(c / d * 100).plot.bar()
```
# Time series data
Time seies data refers to metrics that has a time dimensions, such as stocks data and weather. In this example, we will look at some random time-series data:
```
import numpy as np
ts = pd.Series(np.random.randn(1000), index=pd.date_range('2016-01-01', periods=1000))
ts.plot()
ts = ts.cumsum()
ts.plot()
```
# Datetime index filtering
```
ts.index
ts['2016-02-01':'2016-05-01'].plot()
```
# Summary
Pandas is a very helpful packages in data science, it helps you check and visualize data very quickly. This files contains only a very small portion of the pandas function. please read other materials for more informations.
| github_jupyter |
# Effective Data Visualization
## PyCon 2020
## Husni Almoubayyed [https://husni.space]
## Intro on packages:
- **Matplotlib and Seaborn**: Main plotting package in python is called matplotlib. Matplotlib is the base for another package which builds on top of it called Seaborn. We will use Seaborn when possible as it makes most things a lot more easier and allows us to achieve plots with sensible choices and significantly less lines of code. We will still use matplotlib for some things and it is important to understand every time Seaborn creates a plot it is calling Matplotlib in the background (it is also sometimes calling other things like statsmodels in the background to do some statistical calculations)
Matplotlib and Seaborn syntax is usually used as follows: plt. or sns.*typeofgraph*(arguments)
arguments are usually X and Y coordinates (or names of X and Y columns in a dataframe), colors, sizes, etc.
- **Pandas** is a library that handles [Pan]el [Da]ta. Basically it allows us to manipulate data in tables a lot more easily.
- **Numpy** is a python library that contains all the standard numerical operations you might wanna do
- **Sci-Kit Learn (sklearn)** is a widely used library that you can use to do most common non-deep machine learning methods.
## Intro to datasets:
We will use a few standard datasets throughout this tutorial. These can be imported from seaborn as will be shown later:
- **diamonds**: data on diamonds with prices, carats, color, clarity, cut, etc.
- **flights**: number of passengers in each month for each year for a few years in the ~50s
- **iris**: famous biology dataset that quantifies the morphologic variation of Iris flowers of three related species
- **titanic**: data on all titanic passengers including survival, age, ticket price paid, etc.
- **anscombe**: this is compiled of 4 different datasets that have the same first and second moments but look dramatically different
- **digits**: handwritten data of digits, used widely in machine learning
Other datasets that are not directly imported from seaborn:
- **financial data**: this will be requested in real time from yahoo finance using pandas.
- **CoViD-19 data**: https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_confirmed_usafacts.csv, credits to usafacts.org
## Installation Instructions
Install pip https://pip.pypa.io/en/stable/installing/
In the command line: $pip install notebook
or
Install conda https://www.anaconda.com/distribution/ (for Python >3.7)
and then run:
```
!pip install --upgrade matplotlib numpy scipy sklearn pandas seaborn plotly plotly-geo pandas_datareader
```
You might need to restart the kernel at this point to use any newly installed packages
Alternatively, you can go to bit.ly/PyConViz2020 to use a Colab hosted version of this notebook.
```
# import numpy and matplotlib and setup inline plots by running:
%pylab inline
import seaborn as sns
import pandas as pd
sns.set_style('darkgrid')
sns.set_context('notebook', font_scale=1.5)
sns.set_palette('colorblind')
# set the matplotlib backend to a higher-resolution option, on macOS, this is:
%config InlineBackend.figure_format = 'retina'
# set larger figure size for the rest of this notebook
matplotlib.rcParams['figure.figsize'] = 12, 8
```
## Data Exploration
```
anscombe = sns.load_dataset('anscombe')
for dataset in ['I','II','III','IV']:
print(anscombe[anscombe['dataset']==dataset].describe())
sns.lmplot(x='x', y='y', data=anscombe[anscombe['dataset']=='I'], height=8)
iris = sns.load_dataset('iris')
iris.head()
sns.scatterplot('petal_length', 'petal_width', hue='species', data=iris)
```
## Exercise:
On the same plot, fit 3 linear models for the 3 different iris species with the same x and y axes
```
sns.jointplot('petal_length', 'petal_width', data=iris, height=8, kind='kde')
sns.pairplot(iris, height=8, hue='species')
```
How about categorical data?
We can make boxplots and violin plots simply by running:
```
sns.catplot()
```
**Exercise:** Load up the flights dataset, plot a linear model of the passengers number as a function of year, one for each month of the year.
**Exercise:** Load up the diamonds dataset from seaborn. Plot the price as a function of carat, with different color grades colored differently. choose a small marker size and change the transparency (alpha agrument) to a smaller value than 1. Add some jitter to the x values to make them clearer.
**Exercise:** Load up the Titanic dataset from seaborn. Make a boxplot of the fare of the ticket paid against whether a person survived or not.
## Polar coordinates
```
plt.quiver??
X = np.random.uniform(0, 10, 100)
Y = np.random.uniform(0, 1, 100)
U = np.ones_like(X)
V = np.ones_like(Y)
f = plt.figure()
ax = f.add_subplot(111)
ax.quiver(X, Y, U, V, headlength=0, headaxislength=0, color='steelblue')
theta = np.linspace(0,2*np.pi,100)
r = np.linspace(0, 1, 100)
dr = 1
dt = 0
U = dr * cos(theta) - dt * sin (theta)
V = dr * sin(theta) + dt * cos(theta)
f = plt.figure()
ax = f.add_subplot(111, polar=True)
ax.quiver(theta, r, U, V, headlength=0, headaxislength=0, color='steelblue')
theta = np.linspace(0,2*np.pi,100)
r = np.random.uniform(0, 1, 100)
U = dr * cos(theta)
V = dr * sin(theta)
f = plt.figure()
ax = f.add_subplot(111, polar=True)
ax.quiver(theta, r, U, V, headlength=0, headaxislength=0, color='steelblue')
```
**Exercise 1:** radial plot with all sticks starting at a radius of 1
**Exercise 2:** all sticks are horizontal
**Exercise 3:** Use a 'mollweide' projection using the projection argument of add_subplot(). Use horizontal sticks now but make sure your sticks span the entire space.
# 2. Density Estimation
Often when we are making plots, we are trying to estimate the underlying distribution from which it was randomly drawn, this is known as Density Estimation in statistics. The simplest density estimator that does not make particular assumptions on the distribution of the data (we call this nonparametric) is the histogram.
## Histograms
```
# import out first dataset, an example from biology
iris = sns.load_dataset('iris')
iris.head()
data = iris['sepal_length']
plt.
data = iris['sepal_length']
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.
ax2.
```
Formally, The histogram estimator is $$ \hat{p}(x) = \frac{\hat{\theta_j}}{h} $$ where $$ \hat{\theta_j} = \frac{1}{n} \sum_{i=1}^n I(X_i \in B_j ) $$
We can calculate the mean squared error, which is a metric that tells us how well our estimator is, it turns out to be: $$MSE(x) = bias^2(x) + Var(x) = Ch^2 + \frac{C}{nh} $$
minimized by choosing $h = (\frac{C}{n})^{1/3}$, resulting in a risk (the expected value of the MSE) of:
$$ R = \mathcal{O}(\frac{1}{n})^{2/3}$$
This means that
- There is a bias-variance tradeoff when it comes to choosing the width of the bins, lower width ($h$), means more bias and less variance. There is no choice of $h$ that optimizes both.
- The risk goes down at a pretty slow rate as the number of datapoints increases, which begs the question, is there a better estimator that converges more quickly? The answer is yes, this is achieved by:
## Kernel Density Estimation
Kernels follow the conditions:
$$ K(x) \geq 0, \int K(x) dx = 1, \int x K(x) dx = 0$$
```
sns.
```
So how is this better than the histogram?
We can again calculate the MSE, which turns out to be:
$$MSE(x) = bias^2(x) + Var(x) = C_1h^4 + \frac{C_2}{nh}$$
minimized by choosing $ h = (\frac{C_1}{4nC_2})^{1/5} $, giving a risk of:
$$ R_{KDE} = \mathcal{O}(\frac{1}{n})^{4/5} < R_{histogram}$$
This still has a bias-variance tradeoff, but the estimator converges faster than in the case of histograms. Can we do even better? The answer is no, due to something in statistics called the minimax theorem.
**Exercise**: Instead of using just petal length, consider a 2D distribution with the two axes being petal length and petal width. Plot the distribution, the Histogram of the distribution and the KDE of the distribution. Make sure you play around with bin numbers and bandwidth to get a reasonably satisfying plot
```
data=iris[['petal_length', 'petal_width']]
sns.scatterplot('petal_length', 'petal_width', data=iris)
sns.distplot(iris['petal_length'])
```
# 3. Visualizing High Dimensional Datasets
```
from sklearn.decomposition import PCA
from sklearn.datasets import load_digits
from sklearn.datasets import make_swiss_roll
import mpl_toolkits.mplot3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
from sklearn.manifold import TSNE
digits = load_digits()
shape(digits['data'])
```
## Principal Component Analysis
PCA computes the linear projections of greatest variance from the top eigenvectors of the data covariance matrix
Check out some more cool visualization of PCA at https://setosa.io/ev/principal-component-analysis/ and read more about the math and applications at https://www.cs.cmu.edu/~bapoczos/other_presentations/PCA_24_10_2009.pdf
**Exercise:** Use PCA to reduce the dimensionality of the digits dataset. Plot them color-coded by the different classes of digits.
### Failures of PCA
```
X, t = make_swiss_roll(1000, 0.05)
ward = AgglomerativeClustering(n_clusters=5,
connectivity=kneighbors_graph(X, n_neighbors=5, include_self=False),
linkage='ward').fit(X)
labels = ward.labels_
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for label in np.unique(labels):
ax.scatter(X[labels == label, 0], X[labels == label, 1], X[labels == label, 2])
pca = PCA(2)
projected = pca.fit_transform(X)
for label in np.unique(labels):
sns.scatterplot(projected[labels == label, 0], projected[labels == label, 1],
color=plt.cm.jet(float(label) / np.max(labels + 1)), marker='.')
```
## t-Distributed Stochastic Neighbor Embedding
Converts similarities between data points to joint probabilities and tries to minimize the Kullback-Leibler divergence between the joint probabilities of the low-dimensional embedding and the high-dimensional data.
First, t-SNE constructs a probability distribution over pairs of high-dimensional objects in such a way that similar objects have a high probability of being picked while dissimilar points have an extremely small probability of being picked. Second, t-SNE defines a similar probability distribution over the points in the low-dimensional map, and it minimizes the KullbackโLeibler divergence (KL divergence ) between the two distributions with respect to the locations of the points in the map.
For more details on t-SNE, check out the original paper http://www.jmlr.org/papers/volume9/vandermaaten08a/vandermaaten08a.pdf
```
tSNE = TSNE(learning_rate=10,
perplexity=30)
projected = tSNE.fit_transform(X)
plt.scatter(projected[:, 0], projected[:, 1],
c=labels, alpha=0.3,
cmap=plt.cm.get_cmap('Paired', 5))
#plt.colorbar()
```
**Exercise:** Do this again for the digits dataset. Does this look better than PCA?
# 4. Interactive Visualization
```
# import libraries we're gonna use
import pandas_datareader.data as web
import datetime
import plotly.figure_factory as ff
import plotly.graph_objs as go
start = datetime.datetime(2008, 1, 1)
end = datetime.datetime(2018, 1, 1)
# This fetches the stock prices for the S%P 500 for the dates we selected from Yahoo Finance.
spy_df =
data = go.Scatter(x=spy_df.Date, y=spy_df.Close)
go.Figure(data)
```
**Exercise:** A candlestick chart is a powerful chart in finance that shows the starting price, closing price, highest price and lowerst price of a trading day. Create a aandlestick chart of the first 90 days of the data. You can find Candlestick in the 'go' module.
**Exercise:** It's hard to compare AAPL to SPY when viewed as is. Can you plot this again in a way that makes the returns of AAPL more easily comparable to the returns of the benchmark SPY?
```
covidf = pd.read_csv('~/Downloads/covid_confirmed_usafacts.csv',
dtype={"countyFIPS": str})
covidf.head()
values=covidf['4/5/20']
colorscale = ["#f7fbff","#deebf7","#c6dbef","#9ecae1",
"#6baed6","#4292c6","#2171b5","#08519c","#08306b"]
endpts = list(np.logspace(1, 5, len(colorscale) - 1))
fig = ff.create_choropleth(
fips=covidf['countyFIPS'], values=covidf['4/9/20'],# scope=['usa'],
binning_endpoints=endpts, colorscale=colorscale,
title_text = 'CoViD-19 Confirmed cases as of 4/9/20',
legend_title = '# of cases'
)
go.Figure(fig)
```
Many more types of plotly charts are available with examples here https://plotly.com/python/
# Effective Communication through Plotting
```
image = [[i for i in range(100)]]*10
sns.heatmap(image, cmap='jet', square=True)
```
## Color
```
# code snippet from Jake Vandeplas https://jakevdp.github.io/blog/2014/10/16/how-bad-is-your-colormap/
def grayify_cmap(cmap):
"""Return a grayscale version of the colormap"""
cmap = plt.cm.get_cmap(cmap)
colors = cmap(np.arange(cmap.N))
# convert RGBA to perceived greyscale luminance
# cf. http://alienryderflex.com/hsp.html
RGB_weight = [0.299, 0.587, 0.114]
luminance = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight))
colors[:, :3] = luminance[:, np.newaxis]
return cmap.from_list(cmap.name + "_grayscale", colors, cmap.N)
flights = sns.load_dataset("flights").pivot("month", "year", "passengers")
sns.heatmap(flights, cmap='jet')
sns.heatmap(flights, cmap=grayify_cmap('jet'))
```
## 3 Types of Viable Color palettes/colormaps:
### 1. Perceptually uniform sequential
```
sns.heatmap(flights, cmap='viridis')
sns.heatmap(flights, cmap='Purples')
```
## 2. Diverging
```
import pandas as pd
pit_climate_df = pd.DataFrame(
dict(Month = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'],
High = [2, 3, 10, 16, 22, 27, 28, 28, 24, 17, 10, 5],
Low = [-7, -5, 0, 5, 11, 16, 18, 18, 14,7, 3, -2])
)
pit_climate_df.head()
sns.heatmap(pit_climate_df[['High', 'Low']].T,
cmap='coolwarm',
center=0,#np.mean(pit_climate_df[['High', 'Low']].mean().mean()),
square=True,
xticklabels=pit_climate_df['Month'])
```
## 3. Categorical
example from before:
```
plt.scatter(projected[:, 0], projected[:, 1],
c=digits.target, alpha=0.3,
cmap=plt.cm.get_cmap('Paired', 10))
plt.colorbar()
from IPython.display import Image
Image('Resources/51417489_2006270206137719_6713863014199590912_n.png')
Image('Resources/50283372_1999138550184218_5288878489854803968_o.png')
```
You can also specify a color palette to use for the rest of a notebook or script by running
Other things to consider:
* Use salient marker types, full list at https://matplotlib.org/3.2.1/api/markers_api.html
```
d1 = np.random.uniform(-2.5, 2.5, (100, 100))
d2 = np.random.randn(5,5)
sns.scatterplot(d1[:,0], d1[:,1], marker='+', color='steelblue')
sns.scatterplot(d2[:,0], d2[:,1], color='steelblue')
sns.lmplot('petal_length', 'petal_width', iris,
height=10,
hue='species',
markers=['1','2','3'],
fit_reg=False)
sns.scatterplot(d1[:,0], d1[:,1], marker='+', color='steelblue')
```
There are more than 2 axes on a 2-dimensional screen. Can you think of ways to include more axes?
We can use each of the following to map to an axis:
- color
- size (for numerical data)
- shape (for categorical data)
- literally making a 3D plot (as in the swiss roll, useful in the case of 3 spatial dimensions)
```
sns.set_palette('colorblind')
```
Read more on choosing colors at:
* https://seaborn.pydata.org/tutorial/color_palettes.html
* https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
One of my favorite resources on clarity in plotting:
* http://blogs.nature.com/methagora/2013/07/data-visualization-points-of-view.html
New interesting package that we don't have time for today but is definitely worth mentioning. Makes visualization more intuitive by making it declarative is Altair https://altair-viz.github.io
| github_jupyter |
# Mask R-CNN - Train on Shapes Dataset
This notebook shows how to train Mask R-CNN on your own dataset. To keep things simple we use a synthetic dataset of shapes (squares, triangles, and circles) which enables fast training. You'd still need a GPU, though, because the network backbone is a Resnet101, which would be too slow to train on a CPU. On a GPU, you can start to get okay-ish results in a few minutes, and good results in less than an hour.
The code of the *Shapes* dataset is included below. It generates images on the fly, so it doesn't require downloading any data. And it can generate images of any size, so we pick a small image size to train faster.
```
import os
import sys
import random
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
from config import Config
import utils
import model as modellib
import visualize
from model import log
%matplotlib inline
# Root directory of the project
ROOT_DIR = os.getcwd()
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
```
## Configurations
```
class ShapesConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "shapes"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 3 # background + 3 shapes
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 128
IMAGE_MAX_DIM = 128
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 32
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100
# use small validation steps since the epoch is small
VALIDATION_STEPS = 5
config = ShapesConfig()
config.display()
```
## Notebook Preferences
```
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
```
## Dataset
Create a synthetic dataset
Extend the Dataset class and add a method to load the shapes dataset, `load_shapes()`, and override the following methods:
* load_image()
* load_mask()
* image_reference()
```
class ShapesDataset(utils.Dataset):
"""Generates the shapes synthetic dataset. The dataset consists of simple
shapes (triangles, squares, circles) placed randomly on a blank surface.
The images are generated on the fly. No file access required.
"""
def load_shapes(self, count, height, width):
"""Generate the requested number of synthetic images.
count: number of images to generate.
height, width: the size of the generated images.
"""
# Add classes
self.add_class("shapes", 1, "square")
self.add_class("shapes", 2, "circle")
self.add_class("shapes", 3, "triangle")
# Add images
# Generate random specifications of images (i.e. color and
# list of shapes sizes and locations). This is more compact than
# actual images. Images are generated on the fly in load_image().
for i in range(count):
bg_color, shapes = self.random_image(height, width)
self.add_image("shapes", image_id=i, path=None,
width=width, height=height,
bg_color=bg_color, shapes=shapes)
def load_image(self, image_id):
"""Generate an image from the specs of the given image ID.
Typically this function loads the image from a file, but
in this case it generates the image on the fly from the
specs in image_info.
"""
info = self.image_info[image_id]
bg_color = np.array(info['bg_color']).reshape([1, 1, 3])
image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)
image = image * bg_color.astype(np.uint8)
for shape, color, dims in info['shapes']:
image = self.draw_shape(image, shape, dims, color)
return image
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "shapes":
return info["shapes"]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
info = self.image_info[image_id]
shapes = info['shapes']
count = len(shapes)
mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)
for i, (shape, _, dims) in enumerate(info['shapes']):
mask[:, :, i:i+1] = self.draw_shape(mask[:, :, i:i+1].copy(),
shape, dims, 1)
# Handle occlusions
occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)
for i in range(count-2, -1, -1):
mask[:, :, i] = mask[:, :, i] * occlusion
occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))
# Map class names to class IDs.
class_ids = np.array([self.class_names.index(s[0]) for s in shapes])
return mask, class_ids.astype(np.int32)
def draw_shape(self, image, shape, dims, color):
"""Draws a shape from the given specs."""
# Get the center x, y and the size s
x, y, s = dims
if shape == 'square':
cv2.rectangle(image, (x-s, y-s), (x+s, y+s), color, -1)
elif shape == "circle":
cv2.circle(image, (x, y), s, color, -1)
elif shape == "triangle":
points = np.array([[(x, y-s),
(x-s/math.sin(math.radians(60)), y+s),
(x+s/math.sin(math.radians(60)), y+s),
]], dtype=np.int32)
cv2.fillPoly(image, points, color)
return image
def random_shape(self, height, width):
"""Generates specifications of a random shape that lies within
the given height and width boundaries.
Returns a tuple of three valus:
* The shape name (square, circle, ...)
* Shape color: a tuple of 3 values, RGB.
* Shape dimensions: A tuple of values that define the shape size
and location. Differs per shape type.
"""
# Shape
shape = random.choice(["square", "circle", "triangle"])
# Color
color = tuple([random.randint(0, 255) for _ in range(3)])
# Center x, y
buffer = 20
y = random.randint(buffer, height - buffer - 1)
x = random.randint(buffer, width - buffer - 1)
# Size
s = random.randint(buffer, height//4)
return shape, color, (x, y, s)
def random_image(self, height, width):
"""Creates random specifications of an image with multiple shapes.
Returns the background color of the image and a list of shape
specifications that can be used to draw the image.
"""
# Pick random background color
bg_color = np.array([random.randint(0, 255) for _ in range(3)])
# Generate a few random shapes and record their
# bounding boxes
shapes = []
boxes = []
N = random.randint(1, 4)
for _ in range(N):
shape, color, dims = self.random_shape(height, width)
shapes.append((shape, color, dims))
x, y, s = dims
boxes.append([y-s, x-s, y+s, x+s])
# Apply non-max suppression wit 0.3 threshold to avoid
# shapes covering each other
keep_ixs = utils.non_max_suppression(np.array(boxes), np.arange(N), 0.3)
shapes = [s for i, s in enumerate(shapes) if i in keep_ixs]
return bg_color, shapes
# Training dataset
dataset_train = ShapesDataset()
dataset_train.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_train.prepare()
# Validation dataset
dataset_val = ShapesDataset()
dataset_val.load_shapes(50, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_val.prepare()
# Load and display random samples
image_ids = np.random.choice(dataset_train.image_ids, 4)
for image_id in image_ids:
image = dataset_train.load_image(image_id)
mask, class_ids = dataset_train.load_mask(image_id)
visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
```
## Ceate Model
```
# Create model in training mode
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=MODEL_DIR)
# Which weights to start with?
init_with = "coco" # imagenet, coco, or last
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training
model.load_weights(model.find_last()[1], by_name=True)
```
## Training
Train in two stages:
1. Only the heads. Here we're freezing all the backbone layers and training only the randomly initialized layers (i.e. the ones that we didn't use pre-trained weights from MS COCO). To train only the head layers, pass `layers='heads'` to the `train()` function.
2. Fine-tune all layers. For this simple example it's not necessary, but we're including it to show the process. Simply pass `layers="all` to train all layers.
```
# Train the head branches
# Passing layers="heads" freezes all layers except the head
# layers. You can also pass a regular expression to select
# which layers to train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=1,
layers='heads')
# Fine tune all layers
# Passing layers="all" trains all layers. You can also
# pass a regular expression to select which layers to
# train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=2,
layers="all")
# Save weights
# Typically not needed because callbacks save after every epoch
# Uncomment to save manually
# model_path = os.path.join(MODEL_DIR, "mask_rcnn_shapes.h5")
# model.keras_model.save_weights(model_path)
```
## Detection
```
class InferenceConfig(ShapesConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
inference_config = InferenceConfig()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
model_path = model.find_last()[1]
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
# Test on a random image
image_id = random.choice(dataset_val.image_ids)
original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
log("original_image", original_image)
log("image_meta", image_meta)
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
dataset_train.class_names, figsize=(8, 8))
results = model.detect([original_image], verbose=1)
r = results[0]
visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'],
dataset_val.class_names, r['scores'], ax=get_ax())
```
## Evaluation
```
# Compute VOC-Style mAP @ IoU=0.5
# Running on 10 images. Increase for better accuracy.
image_ids = np.random.choice(dataset_val.image_ids, 10)
APs = []
for image_id in image_ids:
# Load image and ground truth data
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
# Run object detection
results = model.detect([image], verbose=0)
r = results[0]
# Compute AP
AP, precisions, recalls, overlaps =\
utils.compute_ap(gt_bbox, gt_class_id,
r["rois"], r["class_ids"], r["scores"])
APs.append(AP)
print("mAP: ", np.mean(APs))
```
| github_jupyter |
# CME 193: Introduction to Scientific Python
## Spring 2018
## Lecture 1
# Lecture 1 Contents
* Course Outline
* Introduction
* Python Basics
* Installing Python
---
# Quick Poll
## Who has written one line of code?
## ...a for loop?
## ...a function?
## Who has heard of recursion?
## ...object oriented programming?
## ...unit testing?
## ... generators ?
## Who has programmed in Python before?
## Who has programmed in R or Stata?
## Anyone written tensorflow?
# Course Outline
## Instructor
## Jacob Perricone
##### jacobp2@stanford.edu
* 2nd year MS Student in ICME
* Raised in New York City
* BSE in Operations Research and Financial Engineering with Certificates in Computer Science, Statistics and Machine Learning from Princeton University
* Python is almost always my language of choice (I use matlab for their solvers, C if i really care about speed).
## Remmelt Ammerlaan
##### remmelt@stanford.edu
* First yeat MS student in the data science track at ICME.
* Born in the Netherlands did my Bachelorโs at McGill in Math and Physics.
* My first introduction to coding was in Python and I still use it today for most of my work.
# Course Content
- Variables Functions Data types
- Strings, Lists, Tuples, Dictionaries
- File input and output (I/O)
- Classes, object oriented programming
- Exception handling, Recursion
- Numpy, Scipy, Pandas and Scikit-learn
- Jupyter, Matplotlib and Seaborn
- Unit tests, multithreading
- List subject to change depending on time, interests
# Course setup
- 8 total sessions
- 45 min lecture
- 5 min break
- 30 min interactive - demos and exercises
- First part of class will be traditional lecture, second part will give you time to work on exercises in class
- **Required deliverables:**
- Exercises
- Two homework assignments
# More abstract setup of course
- My job is to show and explain to you the possibilities and resources of python
- Ultimately, though, it is your job to teach yourself Python
- In order for it to stick, you will need to put in considerable effort
# Exercises
We will work on exercises second half of the class. **Try to finish as much as possible during class time**. They will help you understand topics we just talked about. If you do not finish in class, please try to look at and understand them prior to the next class meeting.
<br>
Feel free (or: you are strongly encouraged) to work in pairs on the exercises. Itโs acceptable to hand in the same copy of code for your exercise solutions if you work in pairs, but you must mention your partner.
# Exercises
#### (continued)
- At the end of the course, you will be required to hand in your solutions for the exercises you attempted.
- This is to show your active participation in class.
- You are expected to do at least 70% of the assigned exercises. Feel free to skip some problems, for example if you lack some required math background knowledge.
- Donโt worry about this now, just save all the code you write.
# Feedback
- If you have comments or would like things to be done differently, please let me know as you think of them. Can tell me in person, via email or Canvas.
- Questionnaires at the end of the quarter are nice, but they wonโt help you.
# Workload
- The only way to learn Python, is by writing a lot of Python.
- Good news: Python is fun to write. Put in the effort these 4 weeks and reap the rewards.
- From past experience: If you are new to programming, consider this a hard 3 unit class where you will have to figure out quite a bit on your own. However, if you have a solid background in another language, this class should be pretty easy.
# To new programmers
- If you have never programmed before, be warned that this will be difficult.
- The problem: 4 weeks, 8 lectures, 1 unit. We will simply go too fast.
- Alternative: spend some time learning on your own (Codecademy / Udacity etc). There are so many excellent resources online these days. We offer this class every quarter.
# Important course information
- **Website**: https://web.stanford.edu/~jacobp2/src/html/cme193.html
- **Canvas**: Use Canvas for discussing problems, turning in homework, etc. Also, I will view participation of Canvas discussion as participation on the course.
- **Office hours**: Directly after class, or by appointment in Huang basement
# References
- The internet is an excellent source, and Google is a perfect starting point.
- Stackoverflow is the most popular online community of coding QA.
- The official documentation is also good: https://docs.python.org/3/.
- Course website and Canvas has a list of useful references - I will also try to update them with specific material with each lecture.
# Last words before we get to it
- Do the work - utilize the class time
- Make friends
- Fail often
- Fail gently
- Be resourceful
# Python Versions
- The most commonly used versions of Python are 2.7 and 3.4 (or 3.6)
- The first version of Python 3 was released 10 years ago in 2008
- The final version of Python 2.X (2.7) came out mid-2010.
- The 2.x branch will see no new major releases
- 3.x is under active development and has already seen over five years of stable releases, with 3.6 in 2016
- All improvements to the standard library are only available in Python 3
# So what's the deal here ?
## Which one to use?
- The release of Python 3.0 caused a ruckus in the developer world since Python 3 is not backward compatable.
- For a long time many large libraries did not support Python 3, so people continued on in Python 2.7
# ... That was 9 years ago
- Many companies and I'm sure some courses at Stanford still use 2.7 (I still use it too), but I guarentee that all of them, including me, want to move to Python 3.
- Python 3 is the future, and so we will be using it (release 3.6)
# Introduction

```
print("Hello, world!")
```
# Python Basics
## Values
- A value is the fundamental thing that a program manipulates.
- Values can be ```โHello, world!โ, 42, 12.34, True```
- Values have types. . .
## Types
1. Numeric Types:
- Integer Types: ``` 92, 12, 0, 1 ```
- Floats (Floating point numbers): ``` 3.1415```
- Complex Numbers: ``` a + b*i ``` (composed of real and imaginary component, both of which are floats ``` a + b*i ```)
- Booleans: ```True/False ``` are a subtype of integers (0 is false, 1 is true)
2. Sequence types:
- Lists : ``` [1,2,3,4,5]```
- Tuples: ``` (1, 2) ```
- range objects: More on this later
3. Strings:
- ``` "Hello World"``` (strings in python are actually immutable sequences but more on this later)
4. There are more...
## Types continued
- Use type to find out the type of a variable, as in
- ``` python
type("Hello, Word")
```
which returns ``` <class 'str'>```
<br>
- Unlike C/C++ and Java, variables can change types. Python keeps track of the type internally (strongly-typed).
## Variables
- One of the most basic and powerful concepts is that of a variable.
- A variable assigns a name to a value.
- Variables are nothing more than reserved memory locations that store values.
- Python variables does not need explicit declaration to reserve memory.
```
message = "Hello, world!"
n = 42
e = 2.71
# note we can print variables:
print(n) # yields 42
# note: everything after pound sign is a comment
```
## Variables
- Almost always preferred to use variables over values:
- Easier to update code
- Easier to understand code (useful naming)
- What does the following code do:
``` python
print(4.2 * 3.5)
```
```
length = 4.2
height = 3.5
area = length * height
print(area)
```
## Keywords
- Not allowed to use keywords for naming, they define structure and rules of a language.
- Python has 29 keywords, they include:
- ```True```
- ```False```
- ```continue```
- ```def```
- ```for```
- ```and ```
- ```return ```
- ```is```
- ```in ```
- ```class```
## Integers
- Basic Operators for integers:
- ```a + b```: addition of a and b
- ```a - b```: subtraction of a and b
- ```a * b```: multiplication of a and b
- ``` a / b```: division of and b
- ``` a % b ```: Modulus (divides a by b and returns the remainder)
- ```a**b```: a to the power of b
- ``` a // b``` : Floor division. Divides a by b and truncates the decimals. If ``` a/b ``` is negative, rounds away from zero toward negative infinity
- Note: ```a / b``` differs in versions of python:
- Python 3 : ```5 / 2``` yields ```2.5```
- Python 2 : ```5 / 2``` yields ```2```
- In Python 2 if one of the operands is a float, the return value is a float: ```5 / 2.0``` yields ```2.5```
- Note: Python automatically uses long integers for very large integers.
- Bitwise operators on integer types:
- ``` x | y ``` bitwise or of x and y
- ``` x ^ y ``` exclusive or of x and y
- ``` x & y ``` bitwise and of x and y
- ``` x << n ``` x shifted to left by n bits
- ``` x >> n ``` x shifted to right by n bits
## Floats
- A floating point number approximates a real number. Note: only finite precision, and finite range (overflow)!
- Basic Operators for floats:
- ```a + b```: addition of a and b
- ```a - b```: subtraction of a and b
- ```a * b```: multiplication of a and b
- ``` a / b```: division of and b
- ``` a % b ```: Modulus (divides a by b and returns the remainder)
- ```a**b```: a to the power of b
- ``` a // b``` : Floor division. Divides a by b and truncates the decimals. If ``` a/b ``` is negative, rounds away
```
import sys
print((sys.float_info))
```
## Comparison Operators
#### Comparisons:
1. Equals: ``` == ```
- ``` 5 == 5 ``` yields ```True```
2. Does not equal: ```!=```
- ``` 5 != 5``` yields ```False```
3. Comparison of object identity: ``` is ```
- ``` x is y ``` yields True if x and y are the same object
4. Negated objected identity: ``` is not ```
- ``` x is not y ``` yields False if x and y are the same object
5. Greater than: ``` > ```
- ```5 > 4 ``` yields ```True```
6. Greater than or equal to: ``` >= ``` (greater than or equal to)
- ```5 >= 5``` yields ```True```
7. Similarly, we have ```<``` and ```<=```.
#### Logical Operators:
There are three logical boolean operations that are used to compare values. They evaluate expressions down to Boolean values, returning either ``` True ``` or ``` False ```
- ``` x or y ```: if x is false, then y, else x (only evaluates y if x is false)
- ``` x and y ```: if x is false, then x, else y (only evaluates second if x is true)
- ``` not x ```: if x is false, then True, else False
Typically used to evaluate whether two or more expressions are true: ``` (5 < 7) and (7 <= 10)```, which is equivalent to ``` 5 < 7 <= 10 ```
## Statements, expressions and operators
- A statement is an instruction that Python can execute, such as
``` x=3 ```
- Python also supports statements such as
* ```x += 3```
- x = x + 3
* ```x -= 3```
- x = x - 3
* ```x *= 3```
- x = x*3
* ```x /= 3```
- x = x/3
* ``` x //= 3```
- x = x//3
- **Note**: the statements above combine an operation and assignment, and thereby only work if the variable x is defined (i.e. ``` x += 3 ``` will fail unless it is preceeded by ``` x = some value ```)
- Operators are special symbols that represent computations, like addition, the values they *operate* on are called operands
- An expression is a combination of values, variable and operators, like ``` x+3 ```
## Modules
- Not all functionality available comes automatically when starting Python, and with good reasons.
- We can add extra functionality by importing modules:
```import math```
- Then we can use things like ```math.pi```
- Useful modules: math, os, random, and as we will see later numpy, scipy and matplotlib.
- More on modules later!
```
import math
math.pi
```
## Control-flow
## Control statements
Control statements allow you to do more complicated tasks:
- ```if```
- ```for```
- ```while```
```
traffic_light = 'green'
```
Using if, we can execute part of a program conditional on some statement being true.
```python
if traffic_light == 'green':
move()
```
## Indentation
In Python, blocks of code are defined using indentation. The indentation within the block needs to be consistent.
This means that everything indented after an if statement is only executed if the statement is True.
If the statement is False, the program skips all indented code and resumes at the first line of unindented code
```
statement = True # Changing this to False will change the control flow!
if statement:
# if statement is True, then all code here
# gets executed but not if statement is False
print("The statement is true")
print("Else, this would not be printed")
# the next lines get executed either way
print("Hello, world,")
print("Bye, world!")
```
## Indentation
- Whitespace is meaningful in Python: especially indentation and placement of newlines.
- Use a newline to end a line of code.
- Use a backslash when must go to next line prematurely.
- No braces to mark blocks of code in Python...
- Use consistent indentation instead.
- The first line with less indentation is outside of the block.
- The first line with more indentation starts a nested block
- Often a colon appears at the start of a new block. (E.g. for function and class definitions.)
- The preferred method of indendation is spaces (not tabs). Python 3 now support inconsistent use of spaces and tabs, but stick to one.
## ```if-else``` statement
We can add more conditions to the ```if``` statement using ```else``` and ```elif``` (short for else if).
Consider the following example
```
# Skipping the cell below as to not introduce functions yet.
def drive():
print("Drive")
def accelerate():
print("Accelerate")
def stop():
print("Stop!")
x = 5
print(x)
traffic_light = 'green'
if traffic_light == 'green':
drive()
elif traffic_light == 'orange':
accelerate()
else:
stop()
traffic_light = 'orange'
if traffic_light == 'green':
drive()
elif traffic_light == 'orange':
accelerate()
else:
stop()
traffic_light = 'blue'
if traffic_light == 'green':
drive()
elif traffic_light == 'orange':
accelerate()
else:
stop()
```
## ```for``` loops
- Very often, one wants to repeat some action. This can be achieved by a for loop
```
for i in range(5):
print(i**2, end='')
print("hello world")
print("hello world", end = ' ')
print("I am on the same line")
```
## ```for``` loops
- ```range(n)``` yields an immutable ```sequence``` of integers $ 0, . . . , n โ 1$. More on this later!
## ```while``` loops
- When we do not know how many iterations are needed, we can use while.
```
i = 1
while i < 100:
print(i, end=' ')
i += i**2 # a += b is short for a = a + b
```
## ```continue```
- The keyword ```continue``` continues with the next iteration of the smallest enclosing loop.
```
for num in range(2, 10):
if num % 2 == 0:
print("Found an even number", num)
continue
print("Found an odd number", num)
```
## ```break```
- The keyword ```break``` allows us to jump out of the smallest enclosing for or while loop.
```
max_n = 10
for n in range(2, max_n):
is_prime = True
for x in range(2, n):
if n % x == 0: # n divisible by x
is_prime = False
print(n, 'equals', x, '*', n/x)
break
# executed if no break in for loop
# loop fell through without finding a factor
if is_prime:
print(n, 'is a prime number')
```
## ```pass```
- The pass statement does nothing, which can come in handy when you are working on something and want to implement some part of your code later.
```
traffic_light = 'green'
if traffic_light == 'green':
pass
else:
stop()
```
# Installing Python
# Anaconda
* Mac OS and Windows
* Anaconda Python is a Python distribution maintained by Continuum Analytics
* The distribution contains the most widely used Python implementation and
up-to-date versions of the most important modules for data science, analytics,
and scientific computing.
* Anaconda is easy to download and install and is free to use in personal,
academic, and commercial environments.
* By default, Anaconda Python installs into a user local directory and usually
does not impact a system provided version of Python. In some cases, having
multiple version of Python installed causes problems.
* Check out this link to download python 3.6 through conda https://anaconda.org/anaconda/python
* I'd recommend setting up an environment for your python using https://conda.io/docs/user-guide/tasks/manage-python.html. Come see me if you need some help
# Jupyter Notebook
```python
# this is a code cell
print("hello from Jupyter notebook")
# hit ctrl-return to execute code block
# hit shift-return to execute code block and move to next cell
```
## This is a Markdown cell
* Markdown is a light weight way to annotate text for nice presentation on the
web or in PDF
* For example the `*` will create a bulletted list
* We can easily do *italics*
## Resources
* <http://jupyter.org/>
* <https://try.jupyter.org/>
```
x = 5
print(x)
```
## This is a markdown cell
# Using the Python interpreter
* An *interpreter* is a program that reads and executes commands
* It is also sometimes called a REPL or read-evaluate-print-loop
* One way to interact with Python is to use the interpreter
* This is useful for interactive work, learning, and simple testing
## Start the Python interpreter on Mac OS
* Open `Terminal.app`. This is located at
`/Applications/Utilities/Terminal.app` or may be found using Spotlight Search.
* This is the Bash prompt where commands are entered after `$`
* Type `python` and hit enter to start the interpreter (The default for Mac is python 2.7, if you see this come see me)
* This a great way to experiment and learn Python
* To exit the interpreter and return to bash:
* Enter `>>> exit()`
* Use the keyboard command `ctrl-d`
## Start the Python interpreter from Jupyter
It is possible to access a Python interpreter from inside of the Jupyter
notebook. This can be a very quick and handy way to experiment with small bits
of Python code.
* From the Jupyter home screen, select the "Terminal" from the "New" dropdown
menu.
# Scripting model
* A Python script is a text file containing Python code
* Python script file names typically end in `.py`
## Let's create our first script
1. Create a text file named `firstscript.py` with your favorite text editor
2. Insert the following Python code into `firstscript.py`:
```python
print("Hello from Python.")
print("I am your first script!")
```
3. Open your favorite terminal emulator (`Terminal.app` on Mac OS)
4. Navigate to the directory containing `firstscript.py` with the `cd` command.
5. Execute the command `$ python firstscript.py`
## Why scripts?
Let's write a simple Python script to compute the first `n` numbers in the
Fibonacci series. As a reminder, each number in the Fibonacci series is the sum
of the two previous numbers. Let `F(i)` be the `i`th number in the series. We
define `F(0) = 0` and `F(1) = 1`, then `F(i) = F(i-1) + F(i-2)` for `i >= 2`.
Numbers `F(0)` to `F(n)` can be computed with the following Python code:
```python
n = 10
if n >= 0:
fn2 = 0
print(fn2,end=',')
if n >= 1:
fn1 = 1
print(fn1,end=',')
for i in range(2,n+1):
fn = fn1 + fn2
print(fn,end=',')
fn2 = fn1
fn1 = fn
print()
```
**Note, the above code is a preview of Python syntax that we will review in this
course.**
## Fibonacci (continued)
Now, paste this code into a file named `fib.py`. Execute the file with
the command `$ python fib.py`. The result should like:
```
$ python fib.py
0,1,1,2,3,5,8,13,21,34,55,
```
To see the utility of scripts, we need to add a bit more code. Change the first
line of `fib.py` to be:
```
import sys
n = int(sys.argv[1])
```
This will instruct the script to obtain the value of `n` from the command line:
```
$ python fib.py 0
0,
$ python fib.py 5
0,1,1,2,3,5,
$ python fib.py 21
0,1,1,2,3,5,8,13,21,34,55,89,144,233,377,610,987,1597,2584,4181,6765,10946,
```
We have increased the utility of our program by making it simple to run from the
command line with different input arguments.
# Fibonacci (continue):
1. When one types ``` python my_script.py ``` all code at indentation level 0 gets run
2. Unlike other languages, there's no ``` main()``` function that gets run automatically
2. However, the interpreter will define a few special variables. If the script is being run directly, the interpreter will set the variable ``` __name__ = '__main__' ```. Now, the proper way to write the fib.py script includes material that will be presented in subsequent slides.
```python
import sys
def fib(n):
if n >= 0:
fn2 = 0
print(fn2,end=',')
if n >= 1:
fn1 = 1
print(fn1,end=',')
for i in range(2,n+1):
fn = fn1 + fn2
print(fn,end=',')
fn2 = fn1
fn1 = fn
if __name__ == '__main__':
n = int(sys.argv[1])
fib(n) ```
| github_jupyter |
```
! nvidia-smi
```
# Introduction
This notebook holds the code for the [Involution](https://arxiv.org/abs/2103.06255) layer in tesorflow. The idea behind this layer is to invert the inherent properties of Convolution. Where convolution is spatial-agnostic and channel-specific, involution is spatial-specific and channel-agnostic.
# Imports
```
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
```
# Convolution
To understand involution we need to first understand convolution. Let us consider $X\in\mathbb{R}^{H\times W\times C_{inp}}$ denote the input feature map where $H, W$ represent its height and width and $C_{inp}$ be its channel size. A collection of $C_{out}$ number of convolution filters with fixed kernel size of $K \times K$ is denoted as $\mathcal{F}\in\mathbb{R}^{C_{out}\times C_{inp}\times K\times K}$.
The filters perform a Multiply-Add operation on the input feature map in a sliding window manner to yeild the output feature map $Y\in \mathbb{R}^{H\times W\times C_{out}}$.
# Involution
Involution kernels $\mathcal{H}\in \mathbb{R}^{H\times W\times K\times K\times G}$ are devised to oprate in a symettrically oppposite manner as that of the convolution kernels. Observing the shape of the involution kernels we observe the following things:
- Each pixel of the input feature map is entitled to get its own involution kernel.
- Each kernel is of the shape of $K\times K\times G$.
- The output $Y$ will be of the same shape as that of the input feature map $X$.
The problem with involution is that we cannot define a fixed shaped kernel, that would hurt resolution independence in the neural network. This thought led the researchers to conceptualize a generation function $\phi$ that generates the involution kernels conditioned on the original input tensor.
$$
\mathcal{H}_{ij}=\phi{(X_{ij})}\\
\mathcal{H}_{ij}=W_{1}\sigma{(W_{0}X_{ij})}\\
$$
```
class Involution(tf.keras.layers.Layer):
def __init__(self, channel, group_number, kernel_size, stride, reduction_ratio):
super().__init__()
# The assert makes sure that the user knows about the
# reduction size. We cannot have 0 filters in Conv2D.
assert reduction_ratio <= channel, print("Reduction ration must be less than or equal to channel size")
self.channel = channel
self.group_number = group_number
self.kernel_size = kernel_size
self.stride = stride
self.reduction_ratio = reduction_ratio
self.o_weights = tf.keras.layers.AveragePooling2D(
pool_size=self.stride,
strides=self.stride,
padding="same") if self.stride > 1 else tf.identity
self.kernel_gen = tf.keras.Sequential([
tf.keras.layers.Conv2D(
filters=self.channel//self.reduction_ratio,
kernel_size=1),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU(),
tf.keras.layers.Conv2D(
filters=self.kernel_size*self.kernel_size*self.group_number,
kernel_size=1)
])
def call(self, x):
_, H, W, C = x.shape
H = H//self.stride
W = W//self.stride
# Extract input feature blocks
unfolded_x = tf.image.extract_patches(
images=x,
sizes=[1,self.kernel_size,self.kernel_size,1],
strides=[1,self.stride,self.stride,1],
rates=[1,1,1,1],
padding="SAME") # B, H, W, K*K*C
unfolded_x = tf.keras.layers.Reshape(
target_shape=(H,
W,
self.kernel_size*self.kernel_size,
C//self.group_number,
self.group_number)
)(unfolded_x) # B, H, W, K*K, C//G, G
# generate the kernel
kernel_inp = self.o_weights(x)
kernel = self.kernel_gen(kernel_inp) # B, H, W, K*K*G
kernel = tf.keras.layers.Reshape(
target_shape=(H,
W,
self.kernel_size*self.kernel_size,
1,
self.group_number)
)(kernel) # B, H, W, K*K, 1, G
# Multiply-Add op
out = tf.math.multiply(kernel, unfolded_x) # B, H, W, K*K, C//G, G
out = tf.math.reduce_sum(out, axis=3) # B, H, W, C//G, G
out = tf.keras.layers.Reshape(
target_shape=(H,
W,
C)
)(out) # B, H, W, C
return out
```
# Comparison
In this section we will try and emulate [TensorFlow's tutorial on CIFAR classification](https://www.tensorflow.org/tutorials/images/cnn). Here we build one model with convolutional layers while the other will be based on involuitonal layers.
```
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
train_ds = tf.data.Dataset.from_tensor_slices((train_images, train_labels)).shuffle(256).batch(256)
test_ds = tf.data.Dataset.from_tensor_slices( (test_images, test_labels)).batch(256)
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i])
# The CIFAR labels happen to be arrays,
# which is why you need the extra index
plt.xlabel(class_names[train_labels[i][0]])
plt.show()
```
## Convolutional Neural Network
```
convolution_model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3), padding="same"),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding="same"),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding="same"),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10),
])
convolution_model.summary()
convolution_model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
conv_history = convolution_model.fit(
train_ds,
epochs=10,
validation_data=test_ds
)
```
### Loss plot
```
plt.plot(conv_history.history["loss"], label="loss")
plt.plot(conv_history.history["val_loss"], label="val_loss")
plt.legend()
plt.show()
```
### Accuracy plot
```
plt.plot(conv_history.history["accuracy"], label="acc")
plt.plot(conv_history.history["val_accuracy"], label="val_acc")
plt.legend()
plt.show()
```
# Involutional Neural Network
```
involution_model = tf.keras.models.Sequential([
Involution(channel=3,group_number=1,kernel_size=3,stride=1,reduction_ratio=2),
tf.keras.layers.ReLU(name="relu1"),
tf.keras.layers.MaxPooling2D((2, 2)),
Involution(channel=3,group_number=1,kernel_size=3,stride=1,reduction_ratio=2),
tf.keras.layers.ReLU(name="relu2"),
tf.keras.layers.MaxPooling2D((2, 2)),
Involution(channel=3,group_number=1,kernel_size=3,stride=1,reduction_ratio=2),
tf.keras.layers.ReLU(name="relu3"),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10),
])
involution_model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
inv_history = involution_model.fit(
train_ds,
epochs=10,
validation_data=test_ds
)
involution_model.summary()
```
### Loss Plot
```
plt.plot(inv_history.history["loss"], label="loss")
plt.plot(inv_history.history["val_loss"], label="val_loss")
plt.legend()
plt.show()
```
### Accuracy Plot
```
plt.plot(inv_history.history["accuracy"], label="acc")
plt.plot(inv_history.history["val_accuracy"], label="val_acc")
plt.legend()
plt.show()
```
### Observation
A fun little experiment is to see the activation maps of the involution kernel.
```
layer_names = ["relu1","relu2","relu3"]
outputs = [involution_model.get_layer(name).output for name in layer_names]
vis_model = tf.keras.Model(involution_model.input, outputs)
fig, axes = plt.subplots(nrows=10, ncols=4, figsize=(10, 20))
[ax.axis("off") for ax in np.ravel(axes)]
for (ax_orig, ax_relu1, ax_relu2, ax_relu3), test_image in zip(axes, test_images[:10]):
relu_images_list = vis_model.predict(tf.expand_dims(test_image,0))
ax_orig.imshow(tf.clip_by_value(test_image, 0, 1))
ax_orig.set_title("Input Image")
ax_relu1.imshow(tf.clip_by_value(relu_images_list[0].squeeze(), 0, 1))
ax_relu1.set_title("ReLU 1")
ax_relu2.imshow(tf.clip_by_value(relu_images_list[1].squeeze(), 0, 1))
ax_relu2.set_title("ReLU 2")
ax_relu3.imshow(tf.clip_by_value(relu_images_list[2].squeeze(), 0, 1))
ax_relu3.set_title("ReLU 3")
```
| github_jupyter |
# Section 4.3 : CYCLICAL MOMENTUM
## Summary
- Learning rate and momentum are closely dependent, and both must be optimised
- Momentum should be set as high as possible without causing instabilities in training
- Momentum cannot be optimised in a similar way to LR, by using a momentum finder
- Optimum settings found to be use of cyclical LR (initially increasing) and cyclical momentum (initially decreasing)
- If constant LR is used, a large (but not too large), constant momentum should be used
- Too large a constant momentum results in instabilities, which are visible in early training
## Momentum in SGD
SGD parameter updates:
$\theta_{iter+1} = \theta_{iter}โ \epsilon\delta L(F(x,\theta),\theta),$
where $\theta$ are the parameters, $\epsilon$ is the learning rate, and $L(F(x,\theta),\theta)$ is the gradient.
Momentum modifies the update rule to:
$\nu_{iter+1} = \alpha\nu_{iter}โ \epsilon\delta L(F(x,\theta),\theta)$
$\theta_{iter+1} = \theta_{iter}+\nu_{iter},$
where $\nu$ is velocity, and $\alpha$ is the momentum coefficient, i.e. the effect of $\alpha$ on the update is of the same scale as $\epsilon$.
## Cyclical momentum Example
Let's take the same model and train a few different configurations fo learning rate and momentum:
```
%matplotlib inline
from __future__ import division
import sys
import os
sys.path.append('../')
from Modules.Basics import *
from Modules.Class_Basics import *
data, features = importData()
nFolds = 5
preprocParams = {'normIn':True, 'pca':False}
compileArgs = {'loss':'binary_crossentropy', 'optimizer':'sgd', 'depth':3, 'width':128, 'lr':5e2}
trainParams = {'epochs':20, 'batch_size':256, 'verbose':0}
```
### Constant LR, Constant Momentum
```
from pathlib import Path
import os
results_ConstLR_ConstMom85, history_ConstLR_ConstMom85 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':{**compileArgs, 'momentum':0.85}},
trainParams, useEarlyStop=False, plot=False)
results_ConstLR_ConstMom90, history_ConstLR_ConstMom90 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':{**compileArgs, 'momentum':0.90}},
trainParams, useEarlyStop=False, plot=False)
results_ConstLR_ConstMom95, history_ConstLR_ConstMom95 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':{**compileArgs, 'momentum':0.95}},
trainParams, useEarlyStop=False, plot=False)
results_ConstLR_ConstMom99, history_ConstLR_ConstMom99 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':{**compileArgs, 'momentum':0.99}},
trainParams, useEarlyStop=False, plot=False)
getModelHistoryComparisonPlot([history_ConstLR_ConstMom85, history_ConstLR_ConstMom90, history_ConstLR_ConstMom95, history_ConstLR_ConstMom99],
['LR=500, Mom=0.85', 'LR=500, Mom=0.90', 'LR=500, Mom=0.95', 'LR=500, Mom=0.99'], cv=True)
```
Similar to the paper, we see that using a constant learning rate requires high values of momentum to converge quickly: as the coefficient is increased, the networks reach their minima in fewer and fewer epochs. At very high momenta (<span style="color:red">red</span>), the network eventually overfits and starts diverging. However it shows slight instability in it's early stages of training, which (as the paper suggests) could be used to catch the eventual overfitting early, and adjust the coefficient.
### Constant LR, Cyclical Momentum
```
stepScale = 4
results_ConstLR_CycMom95_85, history_ConstLR_CycMom95_85 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':compileArgs},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCMom':{'maxMom':0.95,'minMom':0.85,
'scale':stepScale, 'plotMom':False}})
results_ConstLR_CycMom99_90, history_ConstLR_CycMom99_90 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':compileArgs},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCMom':{'maxMom':0.99,'minMom':0.90,
'scale':stepScale, 'plotMom':False}})
results_ConstLR_CycMom99_95, history_ConstLR_CycMom99_95 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':compileArgs},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCMom':{'maxMom':0.99,'minMom':0.95,
'scale':stepScale, 'plotMom':False}})
getModelHistoryComparisonPlot([history_ConstLR_CycMom95_85, history_ConstLR_CycMom99_90, history_ConstLR_CycMom99_95, history_ConstLR_ConstMom99],
['LR=500, Cyclical mom [0.95-0.85]', 'LR=500, Cyclical mom [0.99-0.90]', 'LR=500, Cyclical mom [0.99-0.95]', 'LR=500, Mom=0.99'], cv=True)
```
Here we can see that using a cyclical momentum schedule can be quite unstable (loss fluctuates, possibly an artifact of stepisize), but does provide some resistance to overfitting (late test loss is slow to rise).
Comparing to a constant momentum of 0.99 (<span style="color:red">red</span>) to a cyclical momentum between 0.99 and 0.95 <span style="color:green">green</span>, we can see that the cycling supresses the rise in test loss in late training, and achieves better minima in loss. Initial training is also better, however the artifacts of the scheduling cause mild divergence around epochs 7 and 15, preventing the network from convereging earlier than might otherwise be possible.
As the width of the cycle is increased (<span style="color:green">green</span> to <span style="color:orange">orange</span>), these artifacts become more apparent as the mild diveregnces become sharp spikes.
### Cyclical LR, Constant Momentum
```
stepScale = 4
results_CycLR_ConstMom85, history_CycLR_ConstMom85 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':{**compileArgs, 'momentum':0.85}},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCLR':{'maxLR':5e2,'minLR':5e1,
'scale':stepScale, 'plotLR':False}})
results_CycLR_ConstMom90, history_CycLR_ConstMom90 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':{**compileArgs, 'momentum':0.90}},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCLR':{'maxLR':5e2,'minLR':5e1,
'scale':stepScale, 'plotLR':False}})
results_CycLR_ConstMom95, history_CycLR_ConstMom95 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':{**compileArgs, 'momentum':0.95}},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCLR':{'maxLR':5e2,'minLR':5e1,
'scale':stepScale, 'plotLR':False}})
results_CycLR_ConstMom99, history_CycLR_ConstMom99 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':{**compileArgs, 'momentum':0.99}},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCLR':{'maxLR':5e2,'minLR':5e1,
'scale':stepScale, 'plotLR':False}})
getModelHistoryComparisonPlot([history_CycLR_ConstMom85, history_CycLR_ConstMom90, history_CycLR_ConstMom95, history_CycLR_ConstMom99, history_ConstLR_ConstMom99],
['Cyclical LR [50-500], mom=0.85', 'Cyclical LR [50-500], mom=0.90', 'Cyclical LR [50-500], mom=0.95', 'Cyclical LR [50-500], mom=0.99', 'LR=500, Mom=0.99'], cv=True)
```
Here we see that moving to a cyclical LR schedule might help reduce the instability of using very high momenta. Comparing <span style="color:red">red</span> to <span style="color:purple">purple</span>, we find that the cyclical LR gives a slightly smoother loss evolution, reaches a better loss, and supresses the late-stage overfitting.
### Cyclical LR, cyclical Momentum
```
stepScale = 4
results_CycLR_CycMom95_85, history_CycLR_CycMom95_85 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':compileArgs},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCLR':{'maxLR':5e2,'minLR':5e1,
'scale':stepScale, 'plotLR':False},
'LinearCMom':{'maxMom':0.95,'minMom':0.85,
'scale':stepScale, 'plotMom':False}})
results_CycLR_CycMom99_90, history_CycLR_CycMom99_90 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':compileArgs},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCLR':{'maxLR':5e2,'minLR':5e1,
'scale':stepScale, 'plotLR':False},
'LinearCMom':{'maxMom':0.99,'minMom':0.90,
'scale':stepScale, 'plotMom':False}})
results_CycLR_CycMom99_95, history_CycLR_CycMom99_95 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':compileArgs},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCLR':{'maxLR':5e2,'minLR':5e1,
'scale':stepScale, 'plotLR':False},
'LinearCMom':{'maxMom':0.99,'minMom':0.95,
'scale':stepScale, 'plotMom':False}})
getModelHistoryComparisonPlot([history_CycLR_CycMom95_85, history_CycLR_CycMom99_90, history_CycLR_CycMom99_95, history_ConstLR_CycMom99_95],
['Cyclical LR [50-500], Cyclical Mom [0.95-0.85]', 'Cyclical LR [50-500], Cyclical Mom [0.99-0.90]', 'Cyclical LR [50-500], Cyclical Mom [0.99-0.95]', 'LR=500, Cyclical Mom [0.99-0.95]'], cv=True)
```
Comparing the best CLR+CM setup (<span style="color:green">green</span>) to the fixed LR+CM setup (<span style="color:red">red</span>) it seems that cycling the lR degrades the performance of the network (best loss is higher), however the network never overfits; unlink the <span style="color:red">red</span> line, it reaches it's minima after 7 epochs and then plateus. It's possible that the stability might actually be a consequence of underfitting, in which case the learning rate could perhaps be increased.
### Comparison
```
getModelHistoryComparisonPlot([history_ConstLR_ConstMom99, history_ConstLR_CycMom99_95, history_CycLR_ConstMom99, history_CycLR_CycMom99_95],
['LR=500, Mom=0.99', 'LR=500, Cyclical Mom [0.99-0.95]', 'Cyclical LR [50-500], Mom=0.99', 'Cyclical LR [50-500], Cyclical LR [0.99-0.95]'], cv=True)
```
Comparing the best performing setups from each sechudle configuration it seems that of the hyperparameters tested, for this dataset and architecture, a cycled LR with a constant momentum (<span style="color:green">green</span>) provides the lowest loss, but eventually overfits.
Cycling the momentum and keeping the LR constant (<span style="color:orange">orange</span>) reaches almost as good a loss, but after 40% more epochs, and although it later provides less overfitting, it does suffer from regular peaks and troughs due to the cycling.
Cycling both the LR and the momentum (<span style="color:red">red</span>) causes convergence in the same number of epochs as <span style="color:green">green</span>, but at a higher loss. Having reached its minimum, the test loss then remains flat, possibly indicating that with further adjustments of the hyperparameters it might provide superior performance to <span style="color:green">green</span>.
| github_jupyter |
```
import torch
from torch.autograd import Variable
import torch.nn as nn
import captcha_setting
import operator
import torchvision.transforms as transforms
from PIL import Image
import cv2 as cv
import os
from matplotlib import pyplot as plt
import numpy as np
import copy
#for eachimg in filter_containor:
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.Dropout(0.1), # drop 50% of the neuron
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.Dropout(0.1), # drop 50% of the neuron
nn.ReLU(),
nn.MaxPool2d(2))
self.layer3 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.Dropout(0.1), # drop 50% of the neuron
nn.ReLU(),
nn.MaxPool2d(2))
self.fc = nn.Sequential(
nn.Linear((captcha_setting.IMAGE_WIDTH//8)*(captcha_setting.IMAGE_HEIGHT//8)*64, 1024),
nn.Dropout(0.1), # drop 50% of the neuron
nn.ReLU())
self.rfc = nn.Sequential(
nn.Linear(1024, 256),#captcha_setting.MAX_CAPTCHA*captcha_setting.ALL_CHAR_SET_LEN),
nn.ReLU()
)
self.rfc2 = nn.Sequential(
nn.Linear(256, captcha_setting.MAX_CAPTCHA*captcha_setting.ALL_CHAR_SET_LEN),
)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
#print(out.shape)
out = self.rfc(out)
out = self.rfc2(out)
#out = out.view(out.size(0), -1)
#print(out.shape)
return out
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# cnn = CNN()
cnn = CNN()
cnn.eval()
cnn.load_state_dict(torch.load('model_final_mix_2.pkl'))
cnn.to(device)
transform = transforms.Compose([
# transforms.ColorJitter(),
transforms.Grayscale(),
transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
root_path = '/home/ning_a/Desktop/CAPTCHA/dark_web_captcha/mania_data/'
img_list = os.listdir(root_path)
correct = 0
total = 0
for img_cur_path in img_list:
total += 1
label_predicted = ''
img = cv.imread(root_path+img_cur_path)
img_temp = cv.imread(root_path+img_cur_path)
n_img = np.zeros((img.shape[0],img.shape[1]))
img_aft = cv.normalize(img, n_img, 0,255,cv.NORM_MINMAX)
gray = cv.cvtColor(img_aft,cv.COLOR_BGR2GRAY)
ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)
im2,contours,hierarchy = cv.findContours(thresh,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)
filter_containor = []
temp_img = copy.deepcopy(img)
max_x = 160
max_y = 60
max_x2 = 0
max_y2 = 0
for i in range(0,len(contours)):
x, y, w, h = cv.boundingRect(contours[i])
newimage=img[y:y+h,x:x+w] # ๅ
็จy็กฎๅฎ้ซ๏ผๅ็จx็กฎๅฎๅฎฝ
nrootdir=("cut_image/")
if h<5 or w<5:
continue
filter_containor.append([x, y, w, h])
cv.rectangle(temp_img, (x,y), (x+w,y+h), (153,153,0), 1)
if not os.path.isdir(nrootdir):
os.makedirs(nrootdir)
cv.imwrite( nrootdir+str(i)+".jpg",newimage)
if(x<max_x):
max_x = x
if(y<max_y):
max_y = y
if(x+w>max_x2):
max_x2 = x+w
if(y+h>max_y2):
max_y2 = y+h
cv.imwrite( "temp3.png",img_temp[max_y:max_y2, max_x:max_x2])
seg_img = cv.imread("temp3.png")
filter_containor = []
w = (max_x2-max_x)//6
# print(max_x, max_x2)
# print(max_y, max_y2)
# print(img_temp[9:45,27:130].shape)
for i in range(6):
# print(seg_img[:,i*w:w+i*w].shape)
cv.imwrite( "temp3.png",seg_img[:,i*w:w+i*w])
filter_containor.append(Image.open("temp3.png"))
# print(img_temp.shape)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
for eachimg in filter_containor:
#print(eachimg)
fix_size = (30, 60)
eachimg = eachimg.resize(fix_size)
image = transform(eachimg).unsqueeze(0)
plt.imshow(eachimg)
plt.show()
print(image.shape)
image = torch.tensor(image, device=device).float()
image = Variable(image).to(device)
#print(image.shape)
#image, labels = image.to(device), labels.to(device)
# vimage = generator(image)
predict_label = cnn(image)
#labels = labels.cpu()
predict_label = predict_label.cpu()
_, predicted = torch.max(predict_label, 1)
label_predicted+= captcha_setting.ALL_CHAR_SET[predicted]
# print(captcha_setting.ALL_CHAR_SET[predicted])
# print(captcha_setting.ALL_CHAR_SET[predicted])
parsed_label = ''
if('_' in img_cur_path):
parsed_label = img_cur_path.split('_')[0]
else:
parsed_label = img_cur_path.split('.')[0]
if(parsed_label == label_predicted):
correct +=1
print(label_predicted, parsed_label)
# break
print(correct/total)
```
| github_jupyter |
```
__author__ = 'Tilii: https://kaggle.com/tilii7'
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import matplotlib.cm as cm
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
```
Simply loading the files without any transformation. If you wish to manipulate the data in any way, it should be done here before doing dimensionality reduction in subsequent steps.
```
print('\nLoading files ...')
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
X = train.drop(['id', 'target'], axis=1).values
y = train['target'].values.astype(np.int8)
target_names = np.unique(y)
print('\nThere are %d unique target valuess in this dataset:' % (len(target_names)), target_names)
```
Principal Component Analysis (**[PCA](https://en.wikipedia.org/wiki/Principal_component_analysis)**) identifies the combination of components (directions in the feature space) that account for the most variance in the data.
```
n_comp = 20
# PCA
print('\nRunning PCA ...')
pca = PCA(n_components=n_comp, svd_solver='full', random_state=1001)
X_pca = pca.fit_transform(X)
print('Explained variance: %.4f' % pca.explained_variance_ratio_.sum())
print('Individual variance contributions:')
for j in range(n_comp):
print(pca.explained_variance_ratio_[j])
```
Better than 90% of the data is explained by a single principal component. Just a shade under 99% of variance is explained by 15 components, which means that this dataset can be safely reduced to ~15 features.
Here we plot our 0/1 samples on the first two principal components.
```
colors = ['blue', 'red']
plt.figure(1, figsize=(10, 10))
for color, i, target_name in zip(colors, [0, 1], target_names):
plt.scatter(X_pca[y == i, 0], X_pca[y == i, 1], color=color, s=1,
alpha=.8, label=target_name, marker='.')
plt.legend(loc='best', shadow=False, scatterpoints=3)
plt.title(
"Scatter plot of the training data projected on the 1st "
"and 2nd principal components")
plt.xlabel("Principal axis 1 - Explains %.1f %% of the variance" % (
pca.explained_variance_ratio_[0] * 100.0))
plt.ylabel("Principal axis 2 - Explains %.1f %% of the variance" % (
pca.explained_variance_ratio_[1] * 100.0))
plt.savefig('pca-porto-01.png', dpi=150)
plt.show()
```
There is a nice separation between various groups of customers, but not so between 0/1 categories within each group. This is somewhat exaggerated by the fact that "0" points (blue) are plotted first and "1" points (red) are plotted last. There seems to be more red than blue in that image, even though there are >25x "0" points in reality. I'd be grateful if someone knows how to plot this in a way that would not create this misleading impression.
Regardless, 0/1 points are not separated well at all. That means that they will not be easy to classify, which we all know by now.
**[t-SNE](https://lvdmaaten.github.io/tsne/)** could potentially lead to better data separation/visualization, because unlike PCA it preserves the local structure of data points. The problem with sklearn implementation of t-SNE is its lack of memory optimization. I am pretty sure that the t-SNE code at the very bottom will lead to memory errors on most personal computers, but I leave it commented out if anyone wants to try.
Instead, I ran t-SNE using a much faster and more memory-friendly commandline version, which can be found at the link above.
Here is the output of that exercise:

Again, we can see clear separation between different groups of customers. Some groups even have a nice "coffee bean" structure where two subgroups can be identified (gender?). Alas, there is no clear separation between 0/1 categories.
In strictly technical terms, we are screwed :D
```
# tsne = TSNE(n_components=2, init='pca', random_state=1001, perplexity=30, method='barnes_hut', n_iter=1000, verbose=1)
# X_tsne = tsne.fit_transform(X) # this will either fail or take a while (most likely overnight)
# plt.figure(2, figsize=(10, 10))
# for color, i, target_name in zip(colors, [0, 1], target_names):
# plt.scatter(X_tsne[y == i, 0], X_tsne[y == i, 1], color=color, s=1,
# alpha=.8, label=target_name, marker='.')
# plt.legend(loc='best', shadow=False, scatterpoints=3)
# plt.title('Scatter plot of t-SNE embedding')
# plt.xlabel('X')
# plt.ylabel('Y')
# plt.savefig('t-SNE-porto-01.png', dpi=150)
# plt.show()
```
It was kindly brought up to me that a strange-looking PCA plot above is probably because of categorical variables in this dataset. I leave the original plot up there for posterity.
Let's encode the categorical variables and try again.
```
from sklearn.preprocessing import MinMaxScaler
def scale_data(X, scaler=None):
if not scaler:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
X = train.drop(['id', 'target'], axis=1)
test.drop(['id'], axis=1, inplace=True)
n_train = X.shape[0]
train_test = pd.concat((X, test)).reset_index(drop=True)
col_to_drop = X.columns[X.columns.str.endswith('_cat')]
col_to_dummify = X.columns[X.columns.str.endswith('_cat')].astype(str).tolist()
for col in col_to_dummify:
dummy = pd.get_dummies(train_test[col].astype('category'))
columns = dummy.columns.astype(str).tolist()
columns = [col + '_' + w for w in columns]
dummy.columns = columns
train_test = pd.concat((train_test, dummy), axis=1)
train_test.drop(col_to_dummify, axis=1, inplace=True)
train_test_scaled, scaler = scale_data(train_test)
X = np.array(train_test_scaled[:n_train, :])
test = np.array(train_test_scaled[n_train:, :])
print('\n Shape of processed train data:', X.shape)
print(' Shape of processed test data:', test.shape)
```
Repeating PCA and making another plot of the first two principal components.
```
print('\nRunning PCA again ...')
pca = PCA(n_components=n_comp, svd_solver='full', random_state=1001)
X_pca = pca.fit_transform(X)
print('Explained variance: %.4f' % pca.explained_variance_ratio_.sum())
print('Individual variance contributions:')
for j in range(n_comp):
print(pca.explained_variance_ratio_[j])
plt.figure(1, figsize=(10, 10))
for color, i, target_name in zip(colors, [0, 1], target_names):
plt.scatter(X_pca[y == i, 0], X_pca[y == i, 1], color=color, s=1,
alpha=.8, label=target_name, marker='.')
plt.legend(loc='best', shadow=False, scatterpoints=3)
plt.title(
"Scatter plot of the training data projected on the 1st "
"and 2nd principal components")
plt.xlabel("Principal axis 1 - Explains %.1f %% of the variance" % (
pca.explained_variance_ratio_[0] * 100.0))
plt.ylabel("Principal axis 2 - Explains %.1f %% of the variance" % (
pca.explained_variance_ratio_[1] * 100.0))
plt.savefig('pca-porto-02.png', dpi=150)
plt.show()
```
I think that's a better plot visually and there is a good number of well-defined clusters, but still no clear separation between 0/1 points.
We can red-do the t-SNE plot as well using modified dataset. **Don't try this at home** - it takes 24+ hours using a commandline version of bh_tsne.
Anyway, here is the new t-SNE plot:

Again, lots of interesting clusters, but blue and red dots overlap for the most part.
This just happens to be a difficult classification classification problem, so maybe it is not a big surprise that raw data does not contain enough info for t-SNE to distinguish clearly between the classes.
Unfortunately, it is not much better even after training. Below is a t-SNE plot of activations from the last hidden layer (3rd) of a neural network that was trained on this dataset for 80 epochs. If you download the full version (it is roughly 10.5 x 10.5 inches), you may be able to see better that lots of red dots are concetrated in the lower left quadrat (6-9 on a clock dial), and that there are clearly fewer red dots in the upper right quadrant (0-3 on a clock dial). So the network has succeded somewhat in sequestering the red dots, but they still overlap quite a bit with blue ones.

Later I will have more t-SNE plots from neural network activations in [__this kernel__](https://www.kaggle.com/tilii7/keras-averaging-runs-gini-early-stopping).
| github_jupyter |
```
import torch
import pandas as pd
import matplotlib.pyplot as plt
import os
import subprocess
import numpy as np
os.chdir("/home/jok120/sml/proj/attention-is-all-you-need-pytorch/")
basic_train_cmd = "/home/jok120/build/anaconda3/envs/pytorch_src2/bin/python " +\
"~/sml/proj/attention-is-all-you-need-pytorch/train.py " +\
"{data} {name} -e 1000 -b 1 -nws {warmup} -cl " +\
"-dm {dm} -dih {dih} --early_stopping 20 --train_only --combined_loss"
# params = {"warmup": [1, 10, 100, 250, 500, 1000 ],
# "dm": [128, 256, 512, 1024, 2048],
# "dih": [512, 1024, 2048],
# "dwv": [8, 20, 24, 48, 128, 256]}
params_repeat = {"warmup": [5, 100, 500, 1000],
"dm": [128, 256, 512, 1024, 2048],
"dih": [512, 1024, 2048]}
new_params = {"warmup": [500, 1000, 2000, 4000],
"dm": [56, 128, 256, 512, 1024, 2048],
"dih": [512, 1024, 2048]}
# data_path = "data/data_190529_multi_helix_turns.tch"
data_path = "data/data_190530_3p7k.tch"
name = "0530-3p7k-{:03}"
for name, param in zip(["0530-sh-msea-{:03}", "0530-sh-mseb-{:03}"], [params_repeat, new_params]):
i = 0
cmds = []
for dih in param["dih"]:
for dm in param["dm"]:
for warmup in param["warmup"]:
cmd = basic_train_cmd.format(data=data_path, name=name.format(i),
warmup=warmup, dm=dm, dih=dih)
cmds.append(cmd)
print(cmd[164:] + f" logs/{name}.log".format(i))
with open("logs/" + name.format(i) + ".log", "w") as log:
subprocess.call(cmd, stdout=log, shell=True)
i += 1
```
# Analysis
```
from glob import glob
import seaborn as sns
sns.set(style="darkgrid")
result_files = sorted(glob("logs/0530-3p7k*.train"))
dfs = [pd.read_csv(f) for f in result_files]
titles = [f[5:-6] for f in result_files]
dfes = []
for df in dfs:
df["rmse"] = np.sqrt(df["mse"])
dfes.append(df[df["is_end_of_epoch"]].reset_index())
def do_plot(df, title):
sns.lineplot(x=df.index, y="drmsd", data=df, label="drmsd")
sns.lineplot(x=df.index, y="rmsd", data=df, label="rmsd")
sns.lineplot(x=df.index, y="rmse", data=df, label="rmse")
sns.lineplot(x=df.index, y="combined", data=df, label="drmsd+mse")
plt.ylabel("Loss Value")
plt.xlabel("Epoch")
plt.legend()
plt.title("{} Training".format(title))
do_plot(dfes[0], titles[0])
mins = []
for df, title in zip(dfes, titles):
row = df[df["combined"] == df["combined"].min()]
row["title"] = title[:]
mins.append(row)
mins_df = pd.concat(mins)
mins_df.sort_values("combined", inplace=True)
do_plot(dfes[232], titles[232])
mins_df
names = [int(t[-3:]) for t in mins_df["title"][:10]]
print(names)
for n in names:
do_plot(dfes[n], titles[n])
plt.show()
train_logs = [f"logs/{t}.log" for t in mins_df["title"][:10]]
train_logs
def get_arg(namespace, arg):
pattern = f"{arg}=(.+?),"
return re.search(pattern, namespace).group(1)
for row in mins:
t = row["title"].item()
with open(f"logs/{t}.log", "r") as f:
args = f.readline()
for a in ["d_model", "n_warmup_steps", "d_inner_hid"]:
row[a] = get_arg(args, a)
mins = pd.concat(mins)
mins.sort_values("combined", inplace=True)
mins
def do_loss_plots_on_var(d, var):
sns.boxplot(x=var, y="combined", data=d)
sns.swarmplot(x=var, y="combined", data=d, color="black")
plt.figure()
sns.boxplot(x=var, y="rmsd", data=d)
sns.swarmplot(x=var, y="rmsd", data=d, color="black")
plt.figure()
sns.boxplot(x=var, y="rmse", data=d)
sns.swarmplot(x=var, y="rmse", data=d, color="black")
do_loss_plots_on_var(mins, "d_model")
do_loss_plots_on_var(mins, "n_warmup_steps")
do_loss_plots_on_var(mins, "d_inner_hid")
summary = mins[["title", "combined", "drmsd", "rmsd", "rmse", "d_model", "n_warmup_steps", "d_inner_hid"]]
" ".join(summary["title"][:10].to_list())
summary
```
| github_jupyter |
# <center>Value Function Approximation</center>
## <center>Part II</center>
## <center>Reference: Sutton and Barto, Chapter 9-11</center>
## <center>Table of Contents</center>
<br>
* **Batch Reinforcement Methods**<br><br>
* **Least Squares Policy Iteration(LSPI)**<br><br>
# <center>Batch Reinforcement Methods</center>
## <center>Batch Reinforcement Methods</center>
<br>
* Gradient descent is simple and appealing<br><br>
* But it is not sample efficient<br><br>
* Batch methods seek to find the best fitting value function<br><br>
* Given the agentโs experience (โtraining dataโ)<br><br>
## <center>Least Squares Prediction</center>
<center><img src="img/fa2_slides1.JPG" alt="Multi-armed Bandit" style="width: 600px;"/></center>
## <center>Stochastic Gradient Descent with Experience Replay</center>
<center><img src="img/fa2_slides3.JPG" alt="Multi-armed Bandit" style="width: 600px;"/></center>
## <center>Experience Replay in Deep Q-Networks (DQN)</center>
<center><img src="img/fa2_slides4.JPG" alt="Multi-armed Bandit" style="width: 600px;"/></center>
# <center>DQN in ATARI</center>
## The model
<center><img src="img/fa2_ex1.JPG" alt="Multi-armed Bandit" style="width: 700px;"/></center>
## Performance
<center><img src="img/fa2_ex2.JPG" alt="Multi-armed Bandit" style="width: 700px;"/></center>
## Benefits of Experience Replay and Double DQN
<center><img src="img/fa2_ex3.JPG" alt="Multi-armed Bandit" style="width: 700px;"/></center>
## DQN Example and Code
<center><img src="img/ex.png" alt="Multi-armed Bandit" style="width: 300px;"/></center>
#### CartPole Example
The agent has to decide between two actions - moving the cart left or right - so that the pole attached to it stays upright.
##### State Space
State is the difference between the current screen patch and the previous one. This will allow the agent to take the velocity of the pole into account from one image.
##### Q-network
* Our model will be a convolutional neural network that takes in the difference between the current and previous screen patches.
* It has two outputs, representing Q(s,left) and Q(s,right) (where s is the input to the network).
* In effect, the network is trying to predict the quality of taking each action given the current input.
<center><img src="img/2.png" alt="Multi-armed Bandit" style="width: 600px;"/></center>
##### Replay Memory
* Experience replay memory is used for training the DQN.
* It stores the transitions that the agent observes, allowing us to reuse this data later.
* By sampling from it randomly, the transitions that build up a batch are decorrelated.
* It has been shown that this greatly stabilizes and improves the DQN training procedure.
<center><img src="img/1.png" alt="Multi-armed Bandit" style="width: 600px;"/></center>
##### Input Extraction
How do we get the crop of the cart?
<center><img src="img/3.png" alt="Multi-armed Bandit" style="width: 600px;"/></center>
##### Selecting an Action
This is done based on $\epsilon$ greedy policy.
<center><img src="img/4.png" alt="Multi-armed Bandit" style="width: 600px;"/></center>
##### Training
<center><img src="img/5.png" alt="Multi-armed Bandit" style="width: 600px;"/></center>
<center><img src="img/6.png" alt="Multi-armed Bandit" style="width: 600px;"/></center>
<center><img src="img/7.png" alt="Multi-armed Bandit" style="width: 600px;"/></center>
# <center>Linear Least Squares Prediction</center>
## <center>Linear Least Squares Prediction</center>
<br><br>
* Experience replay finds least squares solution<br><br>
* But it may take many iterations<br><br>
* Using linear value function approximation $\hat{v}(s, w) = x(s)^Tw$<br><br>
* We can solve the least squares solution directly
## <center>Linear Least Squares Prediction</center>
<center><img src="img/fa2_slides5.JPG" alt="Multi-armed Bandit" style="width: 600px;"/></center>
## <center>Linear Least Squares Prediction Algorithms</center>
<center><img src="img/fa2_slides6.JPG" alt="Multi-armed Bandit" style="width: 600px;"/></center>
## <center>Linear Least Squares Prediction Algorithms</center>
<center><img src="img/fa2_slides7.JPG" alt="Multi-armed Bandit" style="width: 600px;"/></center>
## <center>Least Squares Policy Iteration(LSPI)</center>
<center><img src="img/fa2_slides8.JPG" alt="Multi-armed Bandit" style="width: 600px;"/></center>
## <center>Least Squares Action-Value Function Approximation</center>
<center><img src="img/fa2_slides9.JPG" alt="Multi-armed Bandit" style="width: 600px;"/></center>
## <center>Least Squares Control</center>
<center><img src="img/fa2_slides10.JPG" alt="Multi-armed Bandit" style="width: 600px;"/></center>
## <center>Least Squares Q-Learning</center>
<center><img src="img/fa2_slides11.JPG" alt="Multi-armed Bandit" style="width: 600px;"/></center>
## <center>Least Squares Policy Iteration(LSPI) Algorithm</center>
<center><img src="img/fa2_slides12.JPG" alt="Multi-armed Bandit" style="width: 600px;"/></center>
| github_jupyter |
**This notebook is an exercise in the [Pandas](https://www.kaggle.com/learn/pandas) course. You can reference the tutorial at [this link](https://www.kaggle.com/residentmario/summary-functions-and-maps).**
---
# Introduction
Now you are ready to get a deeper understanding of your data.
Run the following cell to load your data and some utility functions (including code to check your answers).
```
import pandas as pd
pd.set_option("display.max_rows", 5)
reviews = pd.read_csv("../input/wine-reviews/winemag-data-130k-v2.csv", index_col=0)
from learntools.core import binder; binder.bind(globals())
from learntools.pandas.summary_functions_and_maps import *
print("Setup complete.")
reviews.head()
```
# Exercises
## 1.
What is the median of the `points` column in the `reviews` DataFrame?
```
median_points = reviews.points.median()
# Check your answer
q1.check()
#q1.hint()
q1.solution()
```
## 2.
What countries are represented in the dataset? (Your answer should not include any duplicates.)
```
countries = reviews.country.unique()
# Check your answer
q2.check()
#q2.hint()
q2.solution()
```
## 3.
How often does each country appear in the dataset? Create a Series `reviews_per_country` mapping countries to the count of reviews of wines from that country.
```
reviews_per_country = reviews.country.value_counts()
# Check your answer
q3.check()
#q3.hint()
q3.solution()
```
## 4.
Create variable `centered_price` containing a version of the `price` column with the mean price subtracted.
(Note: this 'centering' transformation is a common preprocessing step before applying various machine learning algorithms.)
```
centered_price = reviews.price-reviews.price.mean()
# Check your answer
q4.check()
q4.hint()
q4.solution()
```
## 5.
I'm an economical wine buyer. Which wine is the "best bargain"? Create a variable `bargain_wine` with the title of the wine with the highest points-to-price ratio in the dataset.
```
bargain_idx = (reviews.points / reviews.price).idxmax()
bargain_wine = reviews.loc[bargain_idx, 'title']
# Check your answer
q5.check()
#q5.hint()
q5.solution()
```
## 6.
There are only so many words you can use when describing a bottle of wine. Is a wine more likely to be "tropical" or "fruity"? Create a Series `descriptor_counts` counting how many times each of these two words appears in the `description` column in the dataset.
```
n_trop = reviews.description.map(lambda desc: "tropical" in desc).sum()
n_fruity = reviews.description. map (lambda desc: "fruity" in desc).sum()
descriptor_counts = pd.Series([n_trop, n_fruity], index = ['tropical', 'fruity'])
# Check your answer
q6.check()
#q6.hint()
q6.solution()
```
## 7.
We'd like to host these wine reviews on our website, but a rating system ranging from 80 to 100 points is too hard to understand - we'd like to translate them into simple star ratings. A score of 95 or higher counts as 3 stars, a score of at least 85 but less than 95 is 2 stars. Any other score is 1 star.
Also, the Canadian Vintners Association bought a lot of ads on the site, so any wines from Canada should automatically get 3 stars, regardless of points.
Create a series `star_ratings` with the number of stars corresponding to each review in the dataset.
```
def stars (row):
if row.country == 'Canada':
return 3
elif row.points >= 95:
return 3
elif row.points >= 85:
return 2
else:
return 1
star_ratings = reviews.apply(stars, axis ='columns' )
# Check your answer
q7.check()
q7.hint()
q7.solution()
```
# Keep going
Continue to **[grouping and sorting](https://www.kaggle.com/residentmario/grouping-and-sorting)**.
---
*Have questions or comments? Visit the [course discussion forum](https://www.kaggle.com/learn/pandas/discussion) to chat with other learners.*
| github_jupyter |
<h1><center>Report 12</center></h1>
<h3><center>Jiachen Tian</center><h3>
# Introduction
This week's task is primarily glint detection. Right now there are three potential ways: KCF, Hough Transform, and Ratio Comparison. Even though Ratio Comparison works well under certain conditions within a certain displacement of the glitn as illustrated in the last report, the noise sensitive nature guarantees its failure under uncertain inputs(thousands of different frams with different noise patterns). KCF tracker works a little better comparing to ratio comparison but it tends to fail with too many inputs. In comparion, Hough transform might still work the best with proper parameter settings. All the above algorithm will be illustrated.
# Setup
Setup path to include our files. import them. use `autoreload` to get changes in as they are made.
```
import os
import sys
import cv2
from matplotlib import pyplot as plt
import statistics
import numpy as np
# load our code
sys.path.insert(0, os.path.abspath('../'))
from plotting import auto_draw
# specific to jupyter notebook
from jupyter_help import cvplt, cvplt_sub
#Import image processing function from optimization
# load any changes as we make them
%load_ext autoreload
%autoreload 2
```
# Ratio comparison
The previous report illustrates how to map the rectangle back to the original place without taking into account two crucial parameters: ratio differences between each glint caused by noise(present even with noise filtering), and variation in terms of displacements. To diminish errors caused by ratio difference, one could find the stsndard deviation between ratio for each part(Top left, top right, bottom left, bottom right) and get the smallest std. However, using standard deviation would leave the program more vulnerable to errors caused by displacement changes.
```
#Read in the original image
image = cv2.imread("../input/chosen_pic.png")
keep1 = image.copy()
keep2 = image.copy()
#Run auto threshold on the original image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#Don't blur it
blurred = cv2.blur(gray,(1, 1))
_,proc = cv2.threshold(blurred,153,153,cv2.THRESH_BINARY)
#Draw the rendered program
cvplt(proc)
#Set the color
color = (255, 0, 0)
#When calculating standard deviation, the individual ratio is based upon number of non-zero pixels.
sample1 = np.array(proc[158:170, 125:137]) #User chosen coordinates
show1 = cv2.rectangle(keep1, (125, 158), (137, 170), color, 2)
number1 = np.count_nonzero(sample1)
cvplt(show1)
print(number1)
#Move it right 10 unit(Standard displacement I set on the previous report)
sample2 = np.array(proc[158:170, 135:147])
show2 = cv2.rectangle(keep2, (135, 158), (147, 170), color, 2)
number2 = np.count_nonzero(sample2)
cvplt(show2)
print(number2)
#Even when it moves to the border, we could still get roughly 27 false pixel that interferes the results.
```
# Hogh Transform
Previously thought that this method is infeasible on glint due to the small size of glint. However, with the help of OTSU's method and results from the pupil detection, it might just work.
```
#Read in the original image
image = cv2.imread("../input/chosen_pic.png")
result = image.copy()
#BGR to grey to eliminate extra layers
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#First parameter: blur(set to 10)
blur = (10, 10)
image = cv2.blur(image,blur)
#second parameter: canny(set to 40 to 50)
#Any reasonable parameter would suffice. OTSU would handle the neutralization.
#Third parameter: threshold(determined by OTSU)
#We want the threshold to be exclusively on glint. So first crop the frame
cropped = image[158:170, 125:137]
#Run OTSU
thre,proc = cv2.threshold(cropped,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#Threshold we need
print(thre)
#Affect the threshold on the big image
_, image =cv2.threshold(image,thre,thre,cv2.THRESH_BINARY)
#Canny the image
edges = cv2.Canny(image, 40, 50)
cvplt(edges)
#If we just run the hough transform like this
circles = cv2.HoughCircles(edges, cv2.HOUGH_GRADIENT, 1,150,\
param1 = 200, param2 = 20,\
minRadius = 0, maxRadius = 0)
circles = circles[0][0]
#Draw the circle
circle = cv2.circle(result, (circles[0], circles[2]), circles[2], color, 2)
#Obviously, the algorithm thinks the circle is on the top
cvplt(circle)
#Way out: search in the cropped area cropped out by user in the first place
small_edges = edges[157:171, 125:139]
cvplt(small_edges)
circles = cv2.HoughCircles(small_edges, cv2.HOUGH_GRADIENT, 1,150,\
param1 = 200, param2 = 10,\
minRadius = 0, maxRadius = 0)
color_true = (0, 255, 0)
circles = circles[0][0]
print(circles)
#Map the circle back to the big picture
y = 157 + int(circles[1])
x = 125 + int(circles[0])
#Plot the fixed circle
circle = cv2.circle(result, (x, y), circles[2], color_true, 2)
cvplt(circle)
#As shown in the green circle, it correctly maps to the correct position.
```
# Optimization
What if the glint moves beyong the cell?
- Make the original cell bigger
- Update the cell position based on pupil
# Analysis
As shown above, for glit detection, I will use hough transform as the main algorithm and KCF as well as ratio differences as complementary analysis to get a more precise result.
# Conclusion
Both Pupil tracking and glint tracking is about to finish. The next step would be to further improve precision and find the angle of staring based upon values from both glint and pupil.
| github_jupyter |
This demo provides examples of `ImageReader` class from `niftynet.io.image_reader` module.
What is `ImageReader`?
The main functionality of `ImageReader` is to search a set of folders, return a list of image files, and load the images into memory in an iterative manner.
A `tf.data.Dataset` instance can be initialised from an `ImageReader`, this makes the module readily usable as an input op to many tensorflow-based applications.
Why `ImageReader`?
- designed for medical imaging formats and applications
- works well with multi-modal input volumes
- works well with `tf.data.Dataset`
## Before the demo...
First make sure the source code is available, and import the module.
For NiftyNet installation, please checkout:
http://niftynet.readthedocs.io/en/dev/installation.html
```
import sys
niftynet_path = '/Users/bar/Documents/Niftynet/'
sys.path.append(niftynet_path)
from niftynet.io.image_reader import ImageReader
```
For demonstration purpose we download some demo data to `~/niftynet/data/`:
```
from niftynet.utilities.download import download
download('anisotropic_nets_brats_challenge_model_zoo_data')
```
## Use case: loading 3D volumes
```
from niftynet.io.image_reader import ImageReader
data_param = {'MR': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG'}}
reader = ImageReader().initialise(data_param)
reader.shapes, reader.tf_dtypes
# read data using the initialised reader
idx, image_data, interp_order = reader(idx=0)
image_data['MR'].shape, image_data['MR'].dtype
# randomly sample the list of images
for _ in range(3):
idx, image_data, _ = reader()
print('{} image: {}'.format(idx, image_data['MR'].shape))
```
The images are always read into a 5D-array, representing:
`[height, width, depth, time, channels]`
## User case: loading pairs of image and label by matching filenames
(In this case the loaded arrays are not concatenated.)
```
from niftynet.io.image_reader import ImageReader
data_param = {'image': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'T2'},
'label': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'Label'}}
reader = ImageReader().initialise(data_param)
# image file information (without loading the volumes)
reader.get_subject(0)
idx, image_data, interp_order = reader(idx=0)
image_data['image'].shape, image_data['label'].shape
```
## User case: loading multiple modalities of image and label by matching filenames
The following code initialises a reader with four modalities, and the `'image'` output is a concatenation of arrays loaded from these files. (The files are concatenated at the fifth dimension)
```
from niftynet.io.image_reader import ImageReader
data_param = {'T1': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'T1', 'filename_not_contains': 'T1c'},
'T1c': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'T1c'},
'T2': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'T2'},
'Flair': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'Flair'},
'label': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'Label'}}
grouping_param = {'image': ('T1', 'T1c', 'T2', 'Flair'), 'label':('label',)}
reader = ImageReader().initialise(data_param, grouping_param)
_, image_data, _ = reader(idx=0)
image_data['image'].shape, image_data['label'].shape
```
## More properties
The input specification supports additional properties include
```python
{'csv_file', 'path_to_search',
'filename_contains', 'filename_not_contains',
'interp_order', 'pixdim', 'axcodes', 'spatial_window_size',
'loader'}
```
see also: http://niftynet.readthedocs.io/en/dev/config_spec.html#input-data-source-section
## Using ImageReader with image-level data augmentation layers
```
from niftynet.io.image_reader import ImageReader
from niftynet.layer.rand_rotation import RandomRotationLayer as Rotate
data_param = {'MR': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG'}}
reader = ImageReader().initialise(data_param)
rotation_layer = Rotate()
rotation_layer.init_uniform_angle([-10.0, 10.0])
reader.add_preprocessing_layers([rotation_layer])
_, image_data, _ = reader(idx=0)
image_data['MR'].shape
# import matplotlib.pyplot as plt
# plt.imshow(image_data['MR'][:, :, 50, 0, 0])
# plt.show()
```
## Using ImageReader with `tf.data.Dataset`
```
import tensorflow as tf
from niftynet.io.image_reader import ImageReader
# initialise multi-modal image and label reader
data_param = {'T1': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'T1', 'filename_not_contains': 'T1c'},
'T1c': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'T1c'},
'T2': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'T2'},
'Flair': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'Flair'},
'label': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'Label'}}
grouping_param = {'image': ('T1', 'T1c', 'T2', 'Flair'), 'label':('label',)}
reader = ImageReader().initialise(data_param, grouping_param)
# reader as a generator
def image_label_pair_generator():
"""
A generator wrapper of an initialised reader.
:yield: a dictionary of images (numpy arrays).
"""
while True:
_, image_data, _ = reader()
yield image_data
# tensorflow dataset
dataset = tf.data.Dataset.from_generator(
image_label_pair_generator,
output_types=reader.tf_dtypes)
#output_shapes=reader.shapes)
dataset = dataset.batch(1)
iterator = dataset.make_initializable_iterator()
# run the tensorlfow graph
with tf.Session() as sess:
sess.run(iterator.initializer)
for _ in range(3):
data_dict = sess.run(iterator.get_next())
print(data_dict.keys())
print('image: {}, label: {}'.format(
data_dict['image'].shape,
data_dict['label'].shape))
```
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#MSVO-3,-70" data-toc-modified-id="MSVO-3,-70-1"><span class="toc-item-num">1 </span>MSVO 3, 70</a></span></li><li><span><a href="#Text-Fabric" data-toc-modified-id="Text-Fabric-2"><span class="toc-item-num">2 </span>Text-Fabric</a></span></li><li><span><a href="#Installing-Text-Fabric" data-toc-modified-id="Installing-Text-Fabric-3"><span class="toc-item-num">3 </span>Installing Text-Fabric</a></span><ul class="toc-item"><li><span><a href="#Prerequisites" data-toc-modified-id="Prerequisites-3.1"><span class="toc-item-num">3.1 </span>Prerequisites</a></span></li><li><span><a href="#TF-itself" data-toc-modified-id="TF-itself-3.2"><span class="toc-item-num">3.2 </span>TF itself</a></span></li></ul></li><li><span><a href="#Pulling-up-a-tablet-and-its-transliteration-using-a-p-number" data-toc-modified-id="Pulling-up-a-tablet-and-its-transliteration-using-a-p-number-4"><span class="toc-item-num">4 </span>Pulling up a tablet and its transliteration using a p-number</a></span></li><li><span><a href="#Non-numerical-quads" data-toc-modified-id="Non-numerical-quads-5"><span class="toc-item-num">5 </span>Non-numerical quads</a></span></li><li><span><a href="#Generating-a-list-of-sign-frequency-and-saving-it-as-a-separate-file" data-toc-modified-id="Generating-a-list-of-sign-frequency-and-saving-it-as-a-separate-file-6"><span class="toc-item-num">6 </span>Generating a list of sign frequency and saving it as a separate file</a></span></li></ul></div>
# Primer 1
This notebook is meant for those with little or no familiarity with
[Text-Fabric](https://github.com/annotation/text-fabric) and will focus on several basic tasks, including calling up an individual proto-cuneiform tablet using a p-number, the coding of complex proto-cuneiform signs using what we will call "quads" and the identification of one of the numeral systems, and a quick look at the frequency of a few sign clusters. Each primer, including this one, will focus on a single tablet and explore three or four analytical possibilities. In this primer we look at MSVO 3, 70, which has the p-number P005381 at CDLI.
## MSVO 3, 70
The proto-cuneiform tablet known as MSVO 3, 70, is held in the British Museum, where it has the museum number BM 140852. The tablet dates to the Uruk III period, ca. 3200-3000 BCE, and is slated for publication in the third volume of Materialien zu den frรผhen Schriftzeugnissen des Vorderen Orients (MSVO). Up to now it has only appeared as a photo in Frรผhe Schrift (Nissen, Damerow and Englund 1990), p. 38.
We'll show the lineart for this tablet and its ATF transcription in a moment, including a link to this tablet on CDLI.
## Text-Fabric
Text-Fabric (TF) is a model for textual data with annotations that is optimized for efficient data analysis. As we will begin to see at the end of this primer, when we check the totals on the reverse of our primer tablet, Text-Fabric also facilitates the creation of new, derived data, which can be added to the original data.
Working with TF is a bit like buying from IKEA. You get all the bits and pieces in a box, and then you assemble it yourself. TF decomposes any dataset into its components, nicely stacked, with every component uniquely labeled. And then we use short reusable bits of code to do specific things. TF is based on a model proposed by [Doedens](http://books.google.nl/books?id=9ggOBRz1dO4C) that focuses on the essential properties of texts such sequence and embedding. For a description of how Text-Fabric has been used for work on the Hebrew Bible, see Dirk Roorda's article [The Hebrew Bible as Data: Laboratory - Sharing - Experiences](https://doi.org/10.5334/bbi.18).
Once data is transformed into Text-Fabric, it can also be used to build rich online interfaces for specific groups of ancient texts. For the Hebrew Bible, have a look at [SHEBANQ](https://shebanq.ancient-data.org/hebrew/text).
The best environment for using Text-Fabric is in a [Jupyter Notebook](http://jupyter.readthedocs.io/en/latest/). This primer is in a Jupyter Notebook: the snippets of code can only be executed if you have installed Python 3, Jupyter Notebook, and Text-Fabric on your own computer.
## Installing Text-Fabric
### Prerequisites
You need to have Python on your system. Most systems have it out of the box,
but alas, that is python2 and we need at least python 3.6.
Install it from [python.org]() or from [Anaconda]().
If you got it from python.org, you also have to install [Jupyter]().
### TF itself
```
pip install text-fabric
```
if you have installed Python with the help of Anaconda, or
```
sudo -H pip3 install text-fabric
```
if you have installed Python from [python.org](https://www.python.org).
###### Execute: If all this is done, the following cells can be executed.
```
import os, sys, collections
from IPython.display import display
from tf.extra.cunei import Cunei
import sys, os
LOC = ("~/github", "Nino-cunei/uruk", "primer1")
A = Cunei(*LOC)
A.api.makeAvailableIn(globals())
```
## Pulling up a tablet and its transliteration using a p-number
Each cuneiform tablet has a unique "p-number" and we can use this p-number in Text-Fabric to bring up any images and the transliteration of a tablet, here P005381.
There is a "node" in Text-Fabric for this tablet. How do we find it and display the transliteration?
* We *search* for the tablet by means of a template;
* we use functions `A.lineart()` and `A.getSource()` to bring up the lineart and transliterations of tablets.
```
pNum = "P005381"
query = f"""
tablet catalogId={pNum}
"""
results = A.search(query)
```
The `results` is a list of "records".
Here we have only one result: `results[0]`.
Each result record is a tuple of nodes mentioned in the template.
Here we only mentioned a single thing: `tablet`.
So we find the node of the matched tablets as the firt member of the result records.
Hence the result tablet node is `results[0][0]`.
```
tablet = results[0][0]
A.lineart(tablet, width=300)
A.getSource(tablet)
```
Now we want to view the numerals on the tablet.
```
query = f"""
tablet catalogId={pNum}
sign type=numeral
"""
results = A.search(query)
```
It is easy to show them at a glance:
```
A.show(results)
```
Or we can show them in a table.
```
A.table(results)
```
There are a few different types of numerals here, but we are just going to look at the numbers belonging to the "shin prime prime" system, abbreviated here as "shinPP," which regularly adds two narrow horizatonal wedges to each number. N04, which is the basic unit in this system, is the fourth, fith and ninth of the preceding numerals: in the fourth occurrence repeated twice, in the fifth, three times and, unsurprisingly, in the ninth, which is the total on the reverse, five times. (N19, which is the next bundling unit in the same system, also occurs in the text.)
```
shinPP = dict(
N41=0.2,
N04=1,
N19=6,
N46=60,
N36=180,
N49=1800,
)
```
First, let's see if we can locate one of the occurrences of shinPP numerals, namely the set of 3(N04) in the first case of the second column on the obverse, using Text-Fabric.
```
query = f"""
tablet catalogId={pNum}
face type=obverse
column number=2
line number=1
=: sign
"""
results = A.search(query)
A.table(results)
```
Note the `:=` in `=: sign`. This is a device to require that the sign starts at the same position
as the `line` above it. Effectively, we ask for the first sign of the line.
Now the result records are tuples `(tablet, face, column, line, sign)`, so if we want
the sign part of the first result, we have to say `results[0][4]` (Python counts from 0).
```
num = results[0][4]
A.pretty(num, withNodes=True)
```
This number is the "node" in Text-Fabric that corresponds to the first sign in the first case of column 2. It is like a bar-code for that position in the entire corpus. Now let's make sure that this node, viz. 106602, is actually a numeral. To do this we check the feature "numeral" of the node 106602. And then we can use A.atfFromSign to extract the transliteration.
```
print(F.type.v(num) == "numeral")
print(A.atfFromSign(num))
```
Let's get the name of the numeral, viz. N04, and the number of times that it occurs. This amounts to splitting apart "3" and "(N04)" but since we are calling features in Text-Fabric rather than trying to pull elements out of the transliteration, we do not need to tweak the information.
```
grapheme = F.grapheme.v(num)
print(grapheme)
iteration = F.repeat.v(num)
print(iteration)
```
Now we can replace "N04" with its value, using the shinPP dictionary above, and then multiple this value by the number of iterations to arrive at the value of the numeral as a whole. Since each occurrence of the numeral N04 has a value of 1, three occurrences of it should have a value of 3.
```
valueFromDict = shinPP.get(grapheme)
value = iteration * valueFromDict
print(value)
```
Just to make sure that we are calculating these values correctly, let's try it again with a numeral whose value is not 1. There is a nice example in case 1b in column 1 on the obverse, where we have 3 occurrences of N19, each of which has a value of 6, so we expect the total value of 3(N19 to be 18.
```
query = f"""
tablet catalogId={pNum}
face type=obverse
column number=1
case number=1b
=: sign
"""
results = A.search(query)
A.table(results)
sign = results[0][4]
grapheme = F.grapheme.v(sign)
iteration = F.repeat.v(sign)
valueFromDict = shinPP.get(grapheme, 0)
value = iteration * valueFromDict
print(value)
```
The next step is to walk through the nodes on the obverse, add up the total of the shinPP system on the obverse, and then do the same for the reverse and see if the obverse and the total on the reverse add up. We expect the 3(N19) and 5(N04) on the obverse to add up to 23, viz. 18 + 5 = 23.
```
shinPPpat = "|".join(shinPP)
query = f"""
tablet catalogId={pNum}
face
sign grapheme={shinPPpat}
"""
results = A.search(query)
A.show(results)
sums = collections.Counter()
for (tablet, face, num) in results:
grapheme = F.grapheme.v(num)
iteration = F.repeat.v(num)
valueFromDict = shinPP[grapheme]
value = iteration * valueFromDict
sums[F.type.v(face)] += value
for faceType in sums:
print(f"{faceType}: {sums[faceType]}")
```
It adds up!
## Non-numerical quads
Now that we have identified the numeral system in the first case of column 2 on the obverse, let's also see what we can find out about the non-numeral signs in the same case.
We use the term "quad" to refer to all orthographic elements that occupy the space of a single proto-cuneiform sign on the surface of the tablet. This includes both an individual proto-cuneiform sign operating on its own as well as combinations of signs that occupy the same space. One of the most elaborate quads in the proto-cuneiform corpus is the following:
```
|SZU2.((HI+1(N57))+(HI+1(N57)))|
```
This quad has two sub-quads `SZU2`, `(HI+1(N57))+(HI+1(N57))`, and the second sub-quad also consists of two sub-quads `HI+1(N57)` and `HI+1(N57)`; both of these sub-quads are, in turn, composed of two further sub-quads `HI` and `1(N57)`.
First we need to pick this super-quad out of the rest of the line: this is how we get the transliteration of the entire line:
```
query = f"""
tablet catalogId={pNum}
face type=obverse
column number=2
line number=1
"""
results = A.search(query)
line = results[0][3]
A.pretty(line, withNodes=True)
```
We can just read off the node of the biggest quad.
```
bigQuad = 143015
```
Now that we have identified the "bigQuad," we can also ask Text-Fabric to show us what it looks like.
```
A.lineart(bigQuad)
```
This extremely complex quad, viz. |SZU2.((HI+1(N57))+(HI+1(N57)))|, is a hapax legomenon, meaning that it only occurs here, but there are three other non-numeral quads in this line besides |SZU2.((HI+1(N57))+(HI+1(N57)))|, namely |GISZ.TE|, GAR and GI4~a, so let's see how frequent these four non-numeral signs are in the proto-cuneiform corpus. We can do this sign by sign using the function "F.grapheme.s()".
```
GISZTEs = F.grapheme.s("|GISZ.TE|")
print(f"|GISZ.TE| {len(GISZTEs)} times")
GARs = F.grapheme.s("GAR")
print(f"GAR = {len(GARs)} times")
GI4s = F.grapheme.s("GI4")
print(f"GI4 = {len(GI4s)} times")
```
There are two problems here that we need to resolve in order to get good numbers: we have to get Text-Fabric to count |GISZ.TE| as a single unit, even though it is composed of two distinct graphemes, and we have to ask it to recognize and count the "a" variant of "GI4". In order to count the number of quads that consist of GISZ and TE, namely |GISZ.TE|, it is convenient to make a frequency index for all quads.
We walk through all the quads, pick up its ATF, and count the frequencies of ATF representations.
```
quadFreqs = collections.Counter()
for q in F.otype.s("quad"):
quadFreqs[A.atfFromQuad(q)] += 1
```
With this in hand, we can quickly count how many quads there are that have both signs `GISZ` and `TE` in them.
Added bonus: we shall also see whether there are quads with both of these signs but composed with other operators and signs as well.
```
for qAtf in quadFreqs:
if "GISZ" in qAtf and "TE" in qAtf:
print(f"{qAtf} ={quadFreqs[qAtf]:>4} times")
```
And we can also look at the set of quads in which GISZ co-occurs with another sign, and likewise, the set of quads in which TE co-occurs with another sign.
```
for qAtf in quadFreqs:
if "GISZ" in qAtf:
print(f"{quadFreqs[qAtf]:>4} x {qAtf}")
for qAtf in quadFreqs:
if "TE" in qAtf:
print(f"{quadFreqs[qAtf]:>4} x {qAtf}")
```
Most of the time, however, when we are interested in particular sign frequencies, we want to cast a wide net and get the frequency of any possibly related sign or quad. The best way to do this is to check the ATF of any sign or quad that might be relevant and add up the number of its occurrences in the corpus. This following script checks both signs and quads and casts the net widely. It looks for the frequency of our same three signs/quads, namely GAR, GI4~a and |GISZ.TE|.
```
quadSignFreqs = collections.Counter()
quadSignTypes = {"quad", "sign"}
for n in N():
nType = F.otype.v(n)
if nType not in quadSignTypes:
continue
atf = A.atfFromQuad(n) if nType == "quad" else A.atfFromSign(n)
quadSignFreqs[atf] += 1
```
We have now an frequency index for all signs and quads in their ATF representation.
Note that if a sign is part of a bigger quad, its occurrence there will be counted as an occurrence of the sign.
```
selectedAtfs = []
for qsAtf in quadSignFreqs:
if "GAR" in qsAtf or "GI4~a" in qsAtf or "|GISZ.TE|" in qsAtf:
selectedAtfs.append(qsAtf)
print(f"{quadSignFreqs[qsAtf]:>4} x {qsAtf}")
```
Let's draw all these quads.
```
for sAtf in selectedAtfs:
A.lineart(sAtf, width="5em", height="5em", withCaption="right")
```
Besides our three targets, 34 occurrences of GI4~a, 401 of GAR and 26 of |GISZ.TE|:
34 x GI4~a
401 x GAR
26 x |GISZ.TE|
it has also pulled in a number of quads that include either GAR or GI4~a, among others:
20 x |ZATU651xGAR|
3 x |NINDA2xGAR|
6 x |4(N57).GAR|
1 x |GI4~a&GI4~a|
1 x |GI4~axA|
There are also other signs tas well as signs that only resemble GAR in transliteration such as LAGAR or GARA2, but as long as we know what we are looking for this type of broader frequency count can be quite useful.
## Generating a list of sign frequency and saving it as a separate file
First, we are going to count the number of distinct signs in the corpus, look at the top hits in the list and finally save the full list to a separate file. Then we will do the same for the quads, and then lastly we are going to combine these two lists and save them as a single frequency list for both signs and quads.
```
fullGraphemes = collections.Counter()
for n in F.otype.s("sign"):
grapheme = F.grapheme.v(n)
if grapheme == "" or grapheme == "โฆ":
continue
fullGrapheme = A.atfFromSign(n)
fullGraphemes[fullGrapheme] += 1
len(fullGraphemes)
```
So there are 1477 distinct proto-cuneiform signs in the corpus. The following snippet of code will show us the first 20 signs on that list.
```
for (value, frequency) in sorted(
fullGraphemes.items(),
key=lambda x: (-x[1], x[0]),
)[0:20]:
print(f"{frequency:>5} x {value}")
```
Now we are going to write the full set of sign frequency results to two files in your `_temp` directory, within this repo. The two files are called:
* `grapheme-alpha.txt`, an alphabetic list of graphemes, along with the frequency of each sign, and
* `grapheme-freq.txt`, which runs from the most frequent to the least.
```
def writeFreqs(fileName, data, dataName):
print(f"There are {len(data)} {dataName}s")
for (sortName, sortKey) in (
("alpha", lambda x: (x[0], -x[1])),
("freq", lambda x: (-x[1], x[0])),
):
with open(f"{A.tempDir}/{fileName}-{sortName}.txt", "w") as fh:
for (item, freq) in sorted(data, key=sortKey):
if item != "":
fh.write(f"{freq:>5} x {item}\n")
```
Now let's go through some of the same steps for quads rather than individual signs, and then export a single frequency list for both signs and quads.
```
quadFreqs = collections.Counter()
for q in F.otype.s("quad"):
quadFreqs[A.atfFromQuad(q)] += 1
print(len(quadFreqs))
```
So there are 740 quads in the corpus, and now we ask for the twenty most frequently attested quads.
```
for (value, frequency) in sorted(
quadFreqs.items(),
key=lambda x: (-x[1], x[0]),
)[0:20]:
print(f"{frequency:>5} x {value}")
```
And for the final task in this primer, we ask Text-Fabric to export a frequency list of both signs and quads in a separate file.
```
reportDir = "reports"
os.makedirs(reportDir, exist_ok=True)
def writeFreqs(fileName, data, dataName):
print(f"There are {len(data)} {dataName}s")
for (sortName, sortKey) in (
("alpha", lambda x: (x[0], -x[1])),
("freq", lambda x: (-x[1], x[0])),
):
with open(f"{reportDir}/{fileName}-{sortName}.txt", "w") as fh:
for (item, freq) in sorted(data.items(), key=sortKey):
if item != "":
fh.write(f"{freq:>5} x {item}\n")
```
This shows up as a pair of files named "quad-signs-alpha.txt" and "quad-signs-freq.txt" and if we copy a few pieces of the quad-signs-freq.txt file here, they look something like this:
29413 x ...
12983 x 1(N01)
6870 x X
3080 x 2(N01)
2584 x 1(N14)
1830 x EN~a
1598 x 3(N01)
1357 x 2(N14)
1294 x 5(N01)
1294 x SZE~a
1164 x GAL~a
Only much farther down the list do we see signs and quads interspersed; here are the signs/quads around 88 occurrences:
88 x NIMGIR
88 x NIM~a
88 x SUG5
86 x EN~b
86 x NAMESZDA
86 x |GI&GI|
85 x GU
85 x |GA~a.ZATU753|
84 x BAD~a
84 x NA2~a
84 x ZATU651
84 x |1(N58).BAD~a|
83 x ZATU759
| github_jupyter |
```
from resources.workspace import *
%matplotlib inline
```
## Dynamical systems
are systems (sets of equations) whose variables evolve in time (the equations contains time derivatives). As a branch of mathematics, its theory is mainly concerned with understanding the behaviour of solutions (trajectories) of the systems.
## Chaos
is also known as the butterfly effect: "a buttefly that flaps its wings in Brazil can 'cause' a hurricane in Texas".
As opposed to the opinions of Descartes/Newton/Laplace, chaos effectively means that even in a deterministic (non-stochastic) universe, we can only predict "so far" into the future. This will be illustrated below using two toy-model dynamical systems made by Edward Lorenz.
---
## The Lorenz (1963) attractor
The [Lorenz-63 dynamical system](https://en.wikipedia.org/wiki/Lorenz_system) can be derived as an extreme simplification of *Rayleigh-Bรฉnard convection*: fluid circulation in a shallow layer of fluid uniformly heated (cooled) from below (above).
This produces the following 3 *coupled* ordinary differential equations (ODE):
$$
\begin{aligned}
\dot{x} & = \sigma(y-x) \\
\dot{y} & = \rho x - y - xz \\
\dot{z} & = -\beta z + xy
\end{aligned}
$$
where the "dot" represents the time derivative, $\frac{d}{dt}$. The state vector is $\mathbf{x} = (x,y,z)$, and the parameters are typically set to
```
SIGMA = 10.0
BETA = 8/3
RHO = 28.0
```
The ODEs can be coded as follows
```
def dxdt(xyz, t0, sigma, beta, rho):
"""Compute the time-derivative of the Lorenz-63 system."""
x, y, z = xyz
return [
sigma * (y - x),
x * (rho - z) - y,
x * y - beta * z
]
```
#### Numerical integration to compute the trajectories
Below is a function to numerically **integrate** the ODEs and **plot** the solutions.
<!--
This function also takes arguments to control ($\sigma$, $\beta$, $\rho$) and of the numerical integration (`N`, `T`).
-->
```
from scipy.integrate import odeint
output_63 = [None]
@interact( sigma=(0.,50), beta=(0.,5), rho=(0.,50), N=(0,50), eps=(0.01,1), T=(0.,40))
def animate_lorenz(sigma=SIGMA, beta=BETA, rho=RHO , N=2, eps=0.01, T=1.0):
# Initial conditions: perturbations around some "proto" state
seed(1)
x0_proto = array([-6.1, 1.2, 32.5])
x0 = x0_proto + eps*randn((N, 3))
# Compute trajectories
tt = linspace(0, T, int(100*T)+1) # Time sequence for trajectory
dd = lambda x,t: dxdt(x,t, sigma,beta,rho) # Define dxdt(x,t) with fixed params.
xx = array([odeint(dd, xn, tt) for xn in x0]) # Integrate
output_63[0] = xx
# PLOTTING
ax = plt.figure(figsize=(10,5)).add_subplot(111, projection='3d')
ax.axis('off')
colors = plt.cm.jet(linspace(0,1,N))
for n in range(N):
ax.plot(*(xx[n,:,:].T),'-' ,c=colors[n])
ax.scatter3D(*xx[n,-1,:],s=40,c=colors[n])
```
**Exc 2**:
* Move `T` (use your arrow keys). What does it control?
* Set `T` to something small; move the sliders for `N` and `eps`. What do they control?
* Visually investigate the system's (i.e. the trajectories') sensititivy to initial conditions by moving `T`, `N` and `eps`. How long do you think it takes (on average) for two trajectories (or the estimation error) to grow twice as far apart as they started (alternatives: 0.03, 0.3, 3, 30)?
### Averages
Slide `N` and `T` to their upper bounds. Execute the code cell below.
```
# Compute the average location of the $m$-th component of the state in TWO ways.
m = 0 # state component index (must be 0,1,2)
nB = 20
xx = output_63[0][:,:,m]
plt.hist(xx[:,-1] ,normed=1,bins=nB, label="ensemble dist.",alpha=1.0) # -1: last time
plt.hist(xx[-1,:] ,normed=1,bins=nB, label="temporal dist.",alpha=0.5) # -1: last ensemble member
#plt.hist(xx.ravel(),normed=1,bins=nB, label="total distribution",alpha=0.5)
plt.legend();
```
**Exc 6*:** Answer the questions below.
* (a) Do you think the samples behind the histograms are drawn from the same distribution?
* (b) The answer to the above question means that this dynamical system is [ergodic](https://en.wikipedia.org/wiki/Ergodic_theory#Ergodic_theorems).
Now, suppose we want to investigate which (DA) method is better at estimating the true state (trajectory) for this system, on average. Should we run several short experiments or one long one?
```
#show_answer("Ergodicity a")
#show_answer("Ergodicity b")
```
---
## The "Lorenz-95" model
The Lorenz-96 system
is a "1D" model, designed to resemble atmospheric convection. Each state variable $\mathbf{x}_m$ can be considered some atmospheric quantity at grid point at a fixed lattitude of the earth. The system
is given by the coupled set of ODEs,
$$
\frac{d \mathbf{x}_m}{dt} = (\mathbf{x}_{m+1} โ \mathbf{x}_{m-2}) \mathbf{x}_{m-1} โ \mathbf{x}_m + F
\, ,
\quad \quad m \in \{1,\ldots,M\}
\, ,
$$
where the subscript indices apply periodically.
This model is not derived from physics but has similar characterisics, such as
<ul>
<li> there is external forcing, determined by a parameter $F$;</li>
<li> there is internal dissipation, emulated by the linear term;</li>
<li> there is energy-conserving advection, emulated by quadratic terms.</li>
</ul>
[Further description in the very readable original article](http://eaps4.mit.edu/research/Lorenz/Predicability_a_Problem_2006.pdf).
**Exc 10:** Show that the "total energy" $\sum_{m=1}^{M} \mathbf{x}_m^2$ is preserved by the quadratic terms in the ODE.
```
#show_answer("Hint: Lorenz energy")
#show_answer("Lorenz energy")
```
The model is animated below.
```
# For all m, any n: s(x,n) := x[m+n], circularly.
def s(x,n):
return np.roll(x,-n)
output_95 = [None]
@interact( M=(5,60,1), Force=(0,40,1), eps=(0.01,3,0.1), T=(0.05,40,0.05))
def animate_lorenz_95(M=40, Force=8.0, eps=0.01,T=0):
# Initial conditions: perturbations
x0 = zeros(M)
x0[0] = eps
def dxdt(x,t):
return (s(x,1)-s(x,-2))*s(x,-1) - x + Force
tt = linspace(0, T, int(40*T)+1)
xx = odeint(lambda x,t: dxdt(x,t), x0, tt)
output_95[0] = xx
plt.figure(figsize=(7,4))
# Plot last only
#plt.plot(xx[-1],'b')
# Plot multiple
Lag = 8
colors = plt.cm.cubehelix(0.1+0.6*linspace(0,1,Lag))
for k in range(Lag,0,-1):
plt.plot(xx[max(0,len(xx)-k)],c=colors[Lag-k])
plt.ylim(-10,20)
```
**Exc 12:** Investigate by moving the sliders: Under which settings of the force `F` is the system chaotic (is the predictability horizon finite)?
---
## Error/perturbation dynamics
**Exc 14*:** Suppose $x(t)$ and $z(t)$ are "twins": they evolve according to the same law $f$:
$$
\begin{align}
\frac{dx}{dt} &= f(x) \\
\frac{dz}{dt} &= f(z) \, .
\end{align}
$$
Define the "error": $\varepsilon(t) = x(t) - z(t)$.
Suppose $z(0)$ is close to $x(0)$.
Let $F = \frac{df}{dx}(x(t))$.
* a) Show that the error evolves according to the ordinary differential equation (ODE)
$$\frac{d \varepsilon}{dt} \approx F \varepsilon \, .$$
```
#show_answer("error evolution")
```
* b) Suppose $F$ is constant. Show that the error grows exponentially: $\varepsilon(t) = \varepsilon(0) e^{F t} $.
```
#show_answer("anti-deriv")
```
* c)
* 1) Suppose $F<1$.
What happens to the error?
What does this mean for predictability?
* 2) Now suppose $F>1$.
Given that all observations are uncertain (i.e. $R_t>0$, if only ever so slightly),
can we ever hope to estimate $x(t)$ with 0 uncertainty?
```
#show_answer("predictability cases")
```
* d) Consider the ODE derived above.
How might we change it in order to model (i.e. emulate) a saturation of the error at some level?
Can you solve this equation?
```
#show_answer("saturation term")
```
* e) Now suppose $z(t)$ evolves according to $\frac{dz}{dt} = g(z)$, with $g \neq f$.
What is now the differential equation governing the evolution of the error, $\varepsilon$?
```
#show_answer("liner growth")
```
**Exc 16*:** Recall the Lorenz-63 system. What is its doubling time (i.e. estimate how long does it take for two trajectories to grow twice as far apart as they were to start with) ?
*Hint: Set `N=50, eps=0.01, T=1,` and compute the spread of the particles now as compared to how they started*
```
xx = output_63[0][:,-1] # Ensemble of particles at the end of integration
### compute your answer here ###
#show_answer("doubling time")
```
The answer actually depends on where in "phase space" the particles started.
To get a universal answer one must average these experiments for many different initial conditions.
## In summary:
Prediction (forecasting) with these systems is challenging because they are chaotic: small errors grow exponentially.
In other words, chaos means that there is a limit to how far into the future we can make predictions (skillfully).
It is therefore crucial to minimize the intial error as much as possible. This is a task for DA.
### Next: [Ensemble [Monte-Carlo] approach](T7 - Ensemble [Monte-Carlo] approach.ipynb)
| github_jupyter |
## 102 - Training Regression Algorithms with the L-BFGS Solver
In this example, we run a linear regression on the *Flight Delay* dataset to predict the delay times.
We demonstrate how to use the `TrainRegressor` and the `ComputePerInstanceStatistics` APIs.
First, import the packages.
```
import numpy as np
import pandas as pd
import mmlspark
```
Next, import the CSV dataset.
```
# load raw data from small-sized 30 MB CSV file (trimmed to contain just what we use)
dataFilePath = "On_Time_Performance_2012_9.csv"
import os, urllib
if not os.path.isfile(dataFilePath):
urllib.request.urlretrieve("https://mmlspark.azureedge.net/datasets/" + dataFilePath,
dataFilePath)
flightDelay = spark.createDataFrame(
pd.read_csv(dataFilePath,
dtype={"Month": np.float64, "Quarter": np.float64,
"DayofMonth": np.float64, "DayOfWeek": np.float64,
"OriginAirportID": np.float64, "DestAirportID": np.float64,
"CRSDepTime": np.float64, "CRSArrTime": np.float64}))
# Print information on the dataset we loaded
print("Records read: " + str(flightDelay.count()))
print("Schema:")
flightDelay.printSchema()
flightDelay.limit(10).toPandas()
```
Split the dataset into train and test sets.
```
train,test = flightDelay.randomSplit([0.75, 0.25])
```
Train a regressor on dataset with `l-bfgs`.
```
from mmlspark import TrainRegressor, TrainedRegressorModel
from pyspark.ml.regression import LinearRegression
from pyspark.ml.feature import StringIndexer
# Convert columns to categorical
catCols = ["Carrier", "DepTimeBlk", "ArrTimeBlk"]
trainCat = train
testCat = test
for catCol in catCols:
simodel = StringIndexer(inputCol=catCol, outputCol=catCol + "Tmp").fit(train)
trainCat = simodel.transform(trainCat).drop(catCol).withColumnRenamed(catCol + "Tmp", catCol)
testCat = simodel.transform(testCat).drop(catCol).withColumnRenamed(catCol + "Tmp", catCol)
lr = LinearRegression().setRegParam(0.1).setElasticNetParam(0.3)
model = TrainRegressor(model=lr, labelCol="ArrDelay").fit(trainCat)
model.write().overwrite().save("flightDelayModel.mml")
```
Score the regressor on the test data.
```
flightDelayModel = TrainedRegressorModel.load("flightDelayModel.mml")
scoredData = flightDelayModel.transform(testCat)
scoredData.limit(10).toPandas()
```
Compute model metrics against the entire scored dataset
```
from mmlspark import ComputeModelStatistics
metrics = ComputeModelStatistics().transform(scoredData)
metrics.toPandas()
```
Finally, compute and show per-instance statistics, demonstrating the usage
of `ComputePerInstanceStatistics`.
```
from mmlspark import ComputePerInstanceStatistics
evalPerInstance = ComputePerInstanceStatistics().transform(scoredData)
evalPerInstance.select("ArrDelay", "Scores", "L1_loss", "L2_loss").limit(10).toPandas()
```
| github_jupyter |
# Object-Oriented Python
During this session, we will be exploring the Oriented-Object paradigm in Python using all what we did with Pandas in previous sessions. We will be working with the same data of aircraft supervising latest Tour de France.
```
import pandas as pd
df = pd.read_json("../data/tour_de_france.json.gz")
```
There are three main principles around OOP:
- **encapsulation**: objects embed properties (attributes, methods);
- **interface**: objects expose and document services, they hide all about their inner behaviour;
- **factorisation**: objects/classes with similar behaviour are grouped together.
A common way of working with Python is to implement **protocols**. Protocols are informal interfaces defined by a set of methods allowing an object to play a particular role in the system. For instance, for an object to behave as an iterable you don't need to subclass an abstract class Iterable or implement explicitely an interface Iterable: it is enough to implement the special methods `__iter__` method or even just the `__getitem__` (we will go through these concepts hereunder).
Let's have a look at the special method `sorted`: it expects an **iterable** structure of **comparable** objects to return a sorted list of these objects. Let's have a look:
```
sorted([-2, 4, 0])
```
However it fails when object are not comparable:
```
sorted([-1, 1+1j, 1-2j])
```
Then we can write our own ComparableComplex class and implement a comparison based on modules. The **comparable** protocol expects the `<` operator to be defined (special keyword: `__lt__`)
```
class ComparableComplex(complex):
def __lt__(a, b):
return abs(a) < abs(b)
# Now this works: note the input is not a list but a generator.
sorted(ComparableComplex(i) for i in [-1, 1 + 1j, 1 - 2j])
```
We will be working with different views of pandas DataFrame for trajectories and collection of trajectories. Before we start any further, let's remember two ways to factorise behaviours in Object-Oriented Programming: **inheritance** and **composition**.
The best way to do is not always obvious and it often takes experience to find the good and bad sides of both paradigms.
In our previous examples, our ComparableComplex *offered not much more* than complex numbers. As long as we don't need to compare them, we could have *put them in a list together* with regular complex numbers *without loss of generality*: after all a ComparableComplex **is** a complex. That's a good smell for **inheritance**.
If we think about our trajectories, we will build them around pandas DataFrames. Trajectories will probably have a single attribute: the dataframe. It could be tempting to inherit from `pd.DataFrame`; it will probably work fine in the beginning but problems will occur sooner than expected (most likely with inconsistent interfaces). We **model** trajectories and collections of trajectories with dataframes, but a trajectory **is not** a dataframe. Be reasonable and go for **composition**.
So now we can start.
- The `__init__` special method defines a constructor. `self` is necessary: it represents the current object.
Note that **the constructor does not return anything**.
```
class FlightCollection:
def __init__(self, data):
self.data = data
class Flight:
def __init__(self, data):
self.data = data
FlightCollection(df)
```
## Special methods
There is nothing much we did at this point: just two classes holding a dataframe as an attribute. Even the output representation is the default one based on the class name and the object's address in memory.
- we can **override** the special `__repr__` method (which **returns** a stringโ**do NOT** `print`!) in order to display a more relevant output. You may use the number of lines in the underlying dataframe for instance.
<div class='alert alert-warning'>
<b>Exercice:</b> Write a relevant <code>__repr__</code> method.
</div>
```
# %load ../solutions/pandas_oo/flight_repr.py
"{0!r}".format(FlightCollection(df))
```
Note that we passed the dataframe in the constructor. We want to keep it that way (we will see later why). However we may want to create a different type of constructor to read directly from the JSON file. There is a special kind of keyword for that.
- `@classmethod` is a decorator to put before a method. It makes it an **class method**, i.e. you call it on the class and not on the object. The first parameter is no longer `self` (the instance) but by convention `cls` (the class).
<div class='alert alert-warning'>
<b>Exercice:</b> Write a relevant <code>read_json</code> class method.
</div>
```
# %load ../solutions/pandas_oo/flight_json.py
collection = FlightCollection.read_json("../data/tour_de_france.json.gz")
```
Now we want to make this `FlightCollection` iterable.
- The special method to implement is `__iter__`. This method takes no argument and **yields** elements one after the other.
<div class='alert alert-warning'>
<b>Exercice:</b> Write a relevant <code>__iter__</code> method which yields Flight instances.
</div>
Of course, you should reuse the code of last session about iteration.
```
# %load ../solutions/pandas_oo/flight_iter.py
collection = FlightCollection.read_json("../data/tour_de_france.json.gz")
for flight in collection:
print(flight)
```
<div class='alert alert-warning'>
<b>Exercice:</b> Write a relevant <code>__repr__</code> method for Flight including callsign, aircraft icao24 code and day of the flight.
</div>
```
# %load ../solutions/pandas_oo/flight_nice_repr.py
for flight in collection:
print(flight)
```
<div class='alert alert-success'>
<b>Note:</b> Since our FlightCollection is iterable, we can pass it to any method accepting iterable structures.
</div>
```
list(collection)
```
<div class='alert alert-warning'>
<b>Warning:</b> However, it won't work here, because Flight instances cannot be compared, unless we specify on which criterion we want to compare.
</div>
```
sorted(collection)
sorted(collection, key=lambda x: x.min("timestamp"))
```
<div class='alert alert-warning'>
<b>Exercice:</b> Implement the proper missing method so that a FlightCollection can be sorted.
</div>
```
# %load ../solutions/pandas_oo/flight_sort.py
sorted(collection)
```
## Data visualisation
See the following snippet of code for plotting trajectories on a map.
```
import matplotlib.pyplot as plt
from cartopy.crs import EuroPP, PlateCarree
fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(projection=EuroPP()))
ax.coastlines("50m")
for flight in collection:
flight.data.plot(
ax=ax,
x="longitude",
y="latitude",
legend=False,
transform=PlateCarree(),
color="steelblue",
)
ax.set_extent((-5, 10, 42, 52))
ax.set_yticks([])
```
<div class='alert alert-warning'>
<b>Exercice:</b> Implement a plot method to make the job even more simple.
</div>
```
# %load ../solutions/pandas_oo/flight_plot.py
fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(projection=EuroPP()))
ax.coastlines("50m")
for flight in collection:
flight.plot(ax, color="steelblue")
ax.set_extent((-5, 10, 42, 52))
ax.set_yticks([])
```
## Indexation
Until now, we implemented all what is necessary to iterate on structures.
This means we have all we need to yield elements one after the other.
Note that:
- Python does not assume your structure has a length.
(There are some infinite iterators, like the one yielding natural integers one after the other.)
- Python cannot guess for you how you want to index your flights.
```
len(collection)
collection['ASR172B']
```
There are many ways to proceed with indexing. We may want to select flights with a specific callsign, or a specific icao24 code. Also, if only one Flight is returned, we want a Flight object. If two or more segments are contained in the underlying dataframe, we want to stick to a FlightCollection.
<div class="alert alert-warning">
<b>Exercice:</b> Implement a <code>__len__</code> special method, then a <code>__getitem__</code> special method that will return a Flight or a FlightCollection (depending on the selection) wrapping data corresponding to the given callsign or icao24 code.
</div>
```
# %load ../solutions/pandas_oo/flight_index.py
collection = FlightCollection.read_json("../data/tour_de_france.json.gz")
collection
collection["3924a0"]
collection["ASR172B"]
from collections import defaultdict
count = defaultdict(int)
for flight in collection["ASR172B"]:
count[flight.icao24] += 1
count
```
As we can see here, this method for indexing is not convenient enough. We could select the only flight `collection["ASR172B"]["3924a0"]` but with current implementation, there is no way to separate the 18 other flights.
<div class='alert alert-warning'>
<b>Exercice:</b> Implement a different <code>__getitem__</code> method that checks the type of the index: filter on callsign/icao24 if the key is a <code>str</code>, filter on the day of the flight if the key is a <code>pd.Timestamp</code>.
</div>
```
# %load ../solutions/pandas_oo/flight_index_time.py
collection = FlightCollection.read_json("../data/tour_de_france.json.gz")
collection["ASR172B"][pd.Timestamp("2019-07-18")]
```
<div class='alert alert-warning'>
<b>Exercice:</b> Plot all trajectories flying on July 18th. How can they be sure to not collide with each other?
</div>
```
# %load ../solutions/pandas_oo/flight_plot_july18.py
```
| github_jupyter |
# SEC405: Scalable, Automated Anomaly Detection with Amazon GuardDuty and SageMaker
## Using IP Insights to score security findings
-------
[Return to the workshop repository](https://github.com/aws-samples/aws-security-workshops/edit/master/detection-ml-wksp/)
Amazon SageMaker IP Insights is an unsupervised anomaly detection algorithm for susicipous IP addresses that uses statistical modeling and neural networks to capture associations between online resources (such as account IDs or hostnames) and IPv4 addresses. Under the hood, it learns vector representations for online resources and IP addresses. This essentially means that if the vector representing an IP address and an online resource are close together, then it is likey for that IP address to access that online resource, even if it has never accessed it before.
In this notebook, we use the Amazon SageMaker IP Insights algorithm to train a model using the `<principal ID, IP address`> tuples we generated from the CloudTrail log data, and then use the model to perform inference on the same type of tuples generated from GuardDuty findings to determine how unusual it is to see a particular IP address for a given principal involved with a finding.
After running this notebook, you should be able to:
- obtain, transform, and store data for use in Amazon SageMaker,
- create an AWS SageMaker training job to produce an IP Insights model,
- use the model to perform inference with an Amazon SageMaker endpoint.
If you would like to know more, please check out the [SageMaker IP Inisghts Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/ip-insights.html).
## Setup
------
*This notebook was created and tested on a ml.m4.xlarge notebook instance. We recommend using the same, but other instance types should still work.*
The following is a cell that contains Python code and can be executed by clicking the button above labelled "Run". When a cell is running, you will see a star in the parentheses to the left (e.g., `In [*]`), and when it has completed you will see a number in the parentheses. Each click of "Run" will execute the next cell in the notebook.
Go ahead and click **Run** now. You should see the text in the `print` statement get printed just beneath the cell.
All of these cells share the same interpreter, so if a cell imports modules, like this one does, those modules will be available to every subsequent cell.
```
import boto3
import botocore
import os
import sagemaker
print("Welcome to IP Insights!")
```
### Configure Amazon S3 Bucket
Before going further, we to specify the S3 bucket that SageMaker will use for input and output data for the model, which will be the bucket where our training and inference tuples from CloudTrail logs and GuardDuty findings, respectively, are located. Edit the following cell to specify the name of the bucket and then run it; you do not need to change the prefix.
```
# Specify the full name of your "sec405-tuplesbucket" here
bucket = 'sec405-tuplesbucket-########'
prefix = ''
```
Finally, run the next cell to complete the setup.
```
execution_role = sagemaker.get_execution_role()
# Check if the bucket exists
try:
boto3.Session().client('s3').head_bucket(Bucket=bucket)
except botocore.exceptions.ParamValidationError as e:
print('Hey! You either forgot to specify your S3 bucket'
' or you gave your bucket an invalid name!')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == '403':
print("Hey! You don't have permission to access the bucket, {}.".format(bucket))
elif e.response['Error']['Code'] == '404':
print("Hey! Your bucket, {}, doesn't exist!".format(bucket))
else:
raise
else:
print('Training input/output will be stored in: s3://{}/{}'.format(bucket, prefix))
```
## Training
Execute the two cells below to start training. Training should take several minutes to complete. You can look at various training metrics in the log as the model trains. These logs are also available in CloudWatch.
```
from sagemaker.amazon.amazon_estimator import get_image_uri
image = get_image_uri(boto3.Session().region_name, 'ipinsights')
# Configure SageMaker IP Insights input channels
train_key = os.path.join(prefix, 'train', 'cloudtrail_tuples.csv')
s3_train_data = 's3://{}/{}'.format(bucket, train_key)
input_data = {
'train': sagemaker.session.s3_input(s3_train_data, distribution='FullyReplicated', content_type='text/csv')
}
# Set up the estimator with training job configuration
ip_insights = sagemaker.estimator.Estimator(
image,
execution_role,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
output_path='s3://{}/{}/output'.format(bucket, prefix),
sagemaker_session=sagemaker.Session())
# Configure algorithm-specific hyperparameters
ip_insights.set_hyperparameters(
num_entity_vectors='20000',
random_negative_sampling_rate='5',
vector_dim='128',
mini_batch_size='1000',
epochs='5',
learning_rate='0.01',
)
# Start the training job (should take 3-4 minutes to complete)
ip_insights.fit(input_data)
print('Training job name: {}'.format(ip_insights.latest_training_job.job_name))
```
## Deployment
Execute the cell below to deploy the trained model on an endpoint for inference. It should take 5-7 minutes to spin up the instance and deploy the model (the horizontal dashed line represents progress, and it will print an exclamation point \[!\] when it is complete).
```
predictor = ip_insights.deploy(
initial_instance_count=1,
instance_type='ml.m4.xlarge'
)
print('Endpoint name: {}'.format(predictor.endpoint))
```
## Inference
We can pass data in a variety of formats to our inference endpoint. In this example, we will pass CSV-formmated data.
```
from sagemaker.predictor import csv_serializer, json_deserializer
predictor.content_type = 'text/csv'
predictor.serializer = csv_serializer
predictor.accept = 'application/json'
predictor.deserializer = json_deserializer
```
When queried by a principal and an IPAddress, the model returns a score (called 'dot_product') which indicates how expected that event is. In other words, the higher the dot_product, the more normal the event is. First let's run inference on the training (normal) data for sanity check.
```
import pandas as pd
# Run inference on training (normal) data for sanity check
s3_infer_data = 's3://{}/{}'.format(bucket, train_key)
inference_data = pd.read_csv(s3_infer_data, header=None)
inference_data.head()
predictor.predict(inference_data.values)
```
Now let's run inference on the GuardDuty findings. Notice that the scores are much lower than the normal scores.
```
# Run inference on GuardDuty findings
infer_key = os.path.join(prefix, 'infer', 'guardduty_tuples.csv')
s3_infer_data = 's3://{}/{}'.format(bucket, infer_key)
inference_data = pd.read_csv(s3_infer_data, header=None)
inference_data.head()
predictor.predict(inference_data.values)
```
## Clean-up
To clean up resources created during the workshop, please see the [Cleaning up](https://github.com/aws-samples/aws-security-workshops/blob/cff322dab7cc0b9d71c4f1575c7016389b9dbe64/detection-ml-wksp/README.md) section in the workshop README guide.
| github_jupyter |
**Source of the materials**: Biopython cookbook (adapted)
<font color='red'>Status: Draft</font>
Swiss-Prot and ExPASy {#chapter:swiss_prot}
=====================
Parsing Swiss-Prot files
------------------------
Swiss-Prot (<http://www.expasy.org/sprot>) is a hand-curated database of
protein sequences. Biopython can parse the โplain textโ Swiss-Prot file
format, which is still used for the UniProt Knowledgebase which combined
Swiss-Prot, TrEMBL and PIR-PSD. We do not (yet) support the UniProtKB
XML file format.
### Parsing Swiss-Prot records
In Sectionย \[sec:SeqIO\_ExPASy\_and\_SwissProt\], we described how to
extract the sequence of a Swiss-Prot record as a `SeqRecord` object.
Alternatively, you can store the Swiss-Prot record in a
`Bio.SwissProt.Record` object, which in fact stores the complete
information contained in the Swiss-Prot record. In this section, we
describe how to extract `Bio.SwissProt.Record` objects from a Swiss-Prot
file.
To parse a Swiss-Prot record, we first get a handle to a Swiss-Prot
record. There are several ways to do so, depending on where and how the
Swiss-Prot record is stored:
- Open a Swiss-Prot file locally:
`>>> handle = open("myswissprotfile.dat")`
- Open a gzipped Swiss-Prot file:
```
import gzip
handle = gzip.open("myswissprotfile.dat.gz")
```
- Open a Swiss-Prot file over the internet:
```
import urllib.request
handle = urllib.request.urlopen("http://www.somelocation.org/data/someswissprotfile.dat")
```
- Open a Swiss-Prot file over the internet from the ExPASy database
(see section \[subsec:expasy\_swissprot\]):
```
from Bio import ExPASy
handle = ExPASy.get_sprot_raw(myaccessionnumber)
```
The key point is that for the parser, it doesnโt matter how the handle
was created, as long as it points to data in the Swiss-Prot format.
We can use `Bio.SeqIO` as described in
Sectionย \[sec:SeqIO\_ExPASy\_and\_SwissProt\] to get file format
agnostic `SeqRecord` objects. Alternatively, we can use `Bio.SwissProt`
get `Bio.SwissProt.Record` objects, which are a much closer match to the
underlying file format.
To read one Swiss-Prot record from the handle, we use the function
`read()`:
```
from Bio import SwissProt
record = SwissProt.read(handle)
```
This function should be used if the handle points to exactly one
Swiss-Prot record. It raises a `ValueError` if no Swiss-Prot record was
found, and also if more than one record was found.
We can now print out some information about this record:
```
print(record.description)
for ref in record.references:
print("authors:", ref.authors)
print("title:", ref.title)
print(record.organism_classification)
```
To parse a file that contains more than one Swiss-Prot record, we use
the `parse` function instead. This function allows us to iterate over
the records in the file.
For example, letโs parse the full Swiss-Prot database and collect all
the descriptions. You can download this from the [ExPAYs FTP
site](ftp://ftp.expasy.org/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.dat.gz)
as a single gzipped-file `uniprot_sprot.dat.gz` (about 300MB). This is a
compressed file containing a single file, `uniprot_sprot.dat` (over
1.5GB).
As described at the start of this section, you can use the Python
library `gzip` to open and uncompress a `.gz` file, like this:
```
import gzip
handle = gzip.open("data/uniprot_sprot.dat.gz")
```
However, uncompressing a large file takes time, and each time you open
the file for reading in this way, it has to be decompressed on the fly.
So, if you can spare the disk space youโll save time in the long run if
you first decompress the file to disk, to get the `uniprot_sprot.dat`
file inside. Then you can open the file for reading as usual:
```
handle = open("data/uniprot_sprot.dat")
```
As of June 2009, the full Swiss-Prot database downloaded from ExPASy
contained 468851 Swiss-Prot records. One concise way to build up a list
of the record descriptions is with a list comprehension:
```
from Bio import SwissProt
handle = open("data/uniprot_sprot.dat")
descriptions = [record.description for record in SwissProt.parse(handle)]
len(descriptions)
descriptions[:5]
```
Or, using a for loop over the record iterator:
```
from Bio import SwissProt
descriptions = []
handle = open("data/uniprot_sprot.dat")
for record in SwissProt.parse(handle):
descriptions.append(record.description)
len(descriptions)
```
Because this is such a large input file, either way takes about eleven
minutes on my new desktop computer (using the uncompressed
`uniprot_sprot.dat` file as input).
It is equally easy to extract any kind of information youโd like from
Swiss-Prot records. To see the members of a Swiss-Prot record, use
```
dir(record)
```
### Parsing the Swiss-Prot keyword and category list
Swiss-Prot also distributes a file `keywlist.txt`, which lists the
keywords and categories used in Swiss-Prot. The file contains entries in
the following form:
```
ID 2Fe-2S.
AC KW-0001
DE Protein which contains at least one 2Fe-2S iron-sulfur cluster: 2 iron
DE atoms complexed to 2 inorganic sulfides and 4 sulfur atoms of
DE cysteines from the protein.
SY Fe2S2; [2Fe-2S] cluster; [Fe2S2] cluster; Fe2/S2 (inorganic) cluster;
SY Di-mu-sulfido-diiron; 2 iron, 2 sulfur cluster binding.
GO GO:0051537; 2 iron, 2 sulfur cluster binding
HI Ligand: Iron; Iron-sulfur; 2Fe-2S.
HI Ligand: Metal-binding; 2Fe-2S.
CA Ligand.
//
ID 3D-structure.
AC KW-0002
DE Protein, or part of a protein, whose three-dimensional structure has
DE been resolved experimentally (for example by X-ray crystallography or
DE NMR spectroscopy) and whose coordinates are available in the PDB
DE database. Can also be used for theoretical models.
HI Technical term: 3D-structure.
CA Technical term.
//
ID 3Fe-4S.
...
```
The entries in this file can be parsed by the `parse` function in the
`Bio.SwissProt.KeyWList` module. Each entry is then stored as a
`Bio.SwissProt.KeyWList.Record`, which is a Python dictionary.
```
from Bio.SwissProt import KeyWList
handle = open("data/keywlist.txt")
records = KeyWList.parse(handle)
for record in records:
print(record['ID'])
print(record['DE'])
```
This prints
```
2Fe-2S.
Protein which contains at least one 2Fe-2S iron-sulfur cluster: 2 iron atoms
complexed to 2 inorganic sulfides and 4 sulfur atoms of cysteines from the
protein.
...
```
Parsing Prosite records
-----------------------
Prosite is a database containing protein domains, protein families,
functional sites, as well as the patterns and profiles to recognize
them. Prosite was developed in parallel with Swiss-Prot. In Biopython, a
Prosite record is represented by the `Bio.ExPASy.Prosite.Record` class,
whose members correspond to the different fields in a Prosite record.
In general, a Prosite file can contain more than one Prosite records.
For example, the full set of Prosite records, which can be downloaded as
a single file (`prosite.dat`) from the [ExPASy FTP
site](ftp://ftp.expasy.org/databases/prosite/prosite.dat), contains 2073
records (version 20.24 released on 4 December 2007). To parse such a
file, we again make use of an iterator:
```
from Bio.ExPASy import Prosite
handle = open("myprositefile.dat")
records = Prosite.parse(handle)
```
We can now take the records one at a time and print out some
information. For example, using the file containing the complete Prosite
database, weโd find
```
from Bio.ExPASy import Prosite
handle = open("prosite.dat")
records = Prosite.parse(handle)
record = next(records)
record.accession
record.name
record.pdoc
record = next(records)
record.accession
record.name
record.pdoc
record = next(records)
record.accession
record.name
record.pdoc
```
and so on. If youโre interested in how many Prosite records there are,
you could use
```
from Bio.ExPASy import Prosite
handle = open("prosite.dat")
records = Prosite.parse(handle)
n = 0
for record in records:
n += 1
n
```
To read exactly one Prosite from the handle, you can use the `read`
function:
```
from Bio.ExPASy import Prosite
handle = open("mysingleprositerecord.dat")
record = Prosite.read(handle)
```
This function raises a ValueError if no Prosite record is found, and
also if more than one Prosite record is found.
Parsing Prosite documentation records
-------------------------------------
In the Prosite example above, the `record.pdoc` accession numbers
`'PDOC00001'`, `'PDOC00004'`, `'PDOC00005'` and so on refer to Prosite
documentation. The Prosite documentation records are available from
ExPASy as individual files, and as one file (`prosite.doc`) containing
all Prosite documentation records.
We use the parser in `Bio.ExPASy.Prodoc` to parse Prosite documentation
records. For example, to create a list of all accession numbers of
Prosite documentation record, you can use
```
from Bio.ExPASy import Prodoc
handle = open("prosite.doc")
records = Prodoc.parse(handle)
accessions = [record.accession for record in records]
```
Again a `read()` function is provided to read exactly one Prosite
documentation record from the handle.
Parsing Enzyme records
----------------------
ExPASyโs Enzyme database is a repository of information on enzyme
nomenclature. A typical Enzyme record looks as follows:
```
ID 3.1.1.34
DE Lipoprotein lipase.
AN Clearing factor lipase.
AN Diacylglycerol lipase.
AN Diglyceride lipase.
CA Triacylglycerol + H(2)O = diacylglycerol + a carboxylate.
CC -!- Hydrolyzes triacylglycerols in chylomicrons and very low-density
CC lipoproteins (VLDL).
CC -!- Also hydrolyzes diacylglycerol.
PR PROSITE; PDOC00110;
DR P11151, LIPL_BOVIN ; P11153, LIPL_CAVPO ; P11602, LIPL_CHICK ;
DR P55031, LIPL_FELCA ; P06858, LIPL_HUMAN ; P11152, LIPL_MOUSE ;
DR O46647, LIPL_MUSVI ; P49060, LIPL_PAPAN ; P49923, LIPL_PIG ;
DR Q06000, LIPL_RAT ; Q29524, LIPL_SHEEP ;
//
```
In this example, the first line shows the EC (Enzyme Commission) number
of lipoprotein lipase (second line). Alternative names of lipoprotein
lipase are โclearing factor lipaseโ, โdiacylglycerol lipaseโ, and
โdiglyceride lipaseโ (lines 3 through 5). The line starting with โCAโ
shows the catalytic activity of this enzyme. Comment lines start with
โCCโ. The โPRโ line shows references to the Prosite Documentation
records, and the โDRโ lines show references to Swiss-Prot records. Not
of these entries are necessarily present in an Enzyme record.
In Biopython, an Enzyme record is represented by the
`Bio.ExPASy.Enzyme.Record` class. This record derives from a Python
dictionary and has keys corresponding to the two-letter codes used in
Enzyme files. To read an Enzyme file containing one Enzyme record, use
the `read` function in `Bio.ExPASy.Enzyme`:
```
from Bio.ExPASy import Enzyme
with open("data/lipoprotein.txt") as handle:
record = Enzyme.read(handle)
record["ID"]
record["DE"]
record["AN"]
record["CA"]
record["PR"]
```
```
record["CC"]
record["DR"]
```
The `read` function raises a ValueError if no Enzyme record is found,
and also if more than one Enzyme record is found.
The full set of Enzyme records can be downloaded as a single file
(`enzyme.dat`) from the [ExPASy FTP
site](ftp://ftp.expasy.org/databases/enzyme/enzyme.dat), containing 4877
records (release of 3 March 2009). To parse such a file containing
multiple Enzyme records, use the `parse` function in `Bio.ExPASy.Enzyme`
to obtain an iterator:
```
from Bio.ExPASy import Enzyme
handle = open("enzyme.dat")
records = Enzyme.parse(handle)
```
We can now iterate over the records one at a time. For example, we can
make a list of all EC numbers for which an Enzyme record is available:
```
ecnumbers = [record["ID"] for record in records]
```
Accessing the ExPASy server
---------------------------
Swiss-Prot, Prosite, and Prosite documentation records can be downloaded
from the ExPASy web server at <http://www.expasy.org>. Six kinds of
queries are available from ExPASy:
get\_prodoc\_entry
: To download a Prosite documentation record in HTML format
get\_prosite\_entry
: To download a Prosite record in HTML format
get\_prosite\_raw
: To download a Prosite or Prosite documentation record in raw format
get\_sprot\_raw
: To download a Swiss-Prot record in raw format
sprot\_search\_ful
: To search for a Swiss-Prot record
sprot\_search\_de
: To search for a Swiss-Prot record
To access this web server from a Python script, we use the `Bio.ExPASy`
module.
### Retrieving a Swiss-Prot record {#subsec:expasy_swissprot}
Letโs say we are looking at chalcone synthases for Orchids (see
sectionย \[sec:orchids\] for some justification for looking for
interesting things about orchids). Chalcone synthase is involved in
flavanoid biosynthesis in plants, and flavanoids make lots of cool
things like pigment colors and UV protectants.
If you do a search on Swiss-Prot, you can find three orchid proteins for
Chalcone Synthase, id numbers O23729, O23730, O23731. Now, letโs write a
script which grabs these, and parses out some interesting information.
First, we grab the records, using the `get_sprot_raw()` function of
`Bio.ExPASy`. This function is very nice since you can feed it an id and
get back a handle to a raw text record (no HTML to mess with!). We can
the use `Bio.SwissProt.read` to pull out the Swiss-Prot record, or
`Bio.SeqIO.read` to get a SeqRecord. The following code accomplishes
what I just wrote:
```
from Bio import ExPASy
from Bio import SwissProt
accessions = ["O23729", "O23730", "O23731"]
records = []
for accession in accessions:
handle = ExPASy.get_sprot_raw(accession)
record = SwissProt.read(handle)
records.append(record)
```
If the accession number you provided to `ExPASy.get_sprot_raw` does not
exist, then `SwissProt.read(handle)` will raise a `ValueError`. You can
catch `ValueException` exceptions to detect invalid accession numbers:
```
for accession in accessions:
handle = ExPASy.get_sprot_raw(accession)
try:
record = SwissProt.read(handle)
except ValueException:
print("WARNING: Accession %s not found" % accession)
records.append(record)
```
### Searching Swiss-Prot
Now, you may remark that I knew the recordsโ accession numbers
beforehand. Indeed, `get_sprot_raw()` needs either the entry name or an
accession number. When you donโt have them handy, you can use one of the
`sprot_search_de()` or `sprot_search_ful()` functions.
`sprot_search_de()` searches in the ID, DE, GN, OS and OG lines;
`sprot_search_ful()` searches in (nearly) all the fields. They are
detailed on <http://www.expasy.org/cgi-bin/sprot-search-de> and
<http://www.expasy.org/cgi-bin/sprot-search-ful> respectively. Note that
they donโt search in TrEMBL by default (argument `trembl`). Note also
that they return HTML pages; however, accession numbers are quite easily
extractable:
```
from Bio import ExPASy
import re
handle = ExPASy.sprot_search_de("Orchid Chalcone Synthase")
# or:
# handle = ExPASy.sprot_search_ful("Orchid and {Chalcone Synthase}")
html_results = handle.read()
if "Number of sequences found" in html_results:
ids = re.findall(r'HREF="/uniprot/(\w+)"', html_results)
else:
ids = re.findall(r'href="/cgi-bin/niceprot\.pl\?(\w+)"', html_results)
```
### Retrieving Prosite and Prosite documentation records
Prosite and Prosite documentation records can be retrieved either in
HTML format, or in raw format. To parse Prosite and Prosite
documentation records with Biopython, you should retrieve the records in
raw format. For other purposes, however, you may be interested in these
records in HTML format.
To retrieve a Prosite or Prosite documentation record in raw format, use
`get_prosite_raw()`. For example, to download a Prosite record and print
it out in raw text format, use
```
from Bio import ExPASy
handle = ExPASy.get_prosite_raw('PS00001')
text = handle.read()
print(text)
```
To retrieve a Prosite record and parse it into a `Bio.Prosite.Record`
object, use
```
from Bio import ExPASy
from Bio import Prosite
handle = ExPASy.get_prosite_raw('PS00001')
record = Prosite.read(handle)
```
The same function can be used to retrieve a Prosite documentation record
and parse it into a `Bio.ExPASy.Prodoc.Record` object:
```
from Bio import ExPASy
from Bio.ExPASy import Prodoc
handle = ExPASy.get_prosite_raw('PDOC00001')
record = Prodoc.read(handle)
```
For non-existing accession numbers, `ExPASy.get_prosite_raw` returns a
handle to an emptry string. When faced with an empty string,
`Prosite.read` and `Prodoc.read` will raise a ValueError. You can catch
these exceptions to detect invalid accession numbers.
The functions `get_prosite_entry()` and `get_prodoc_entry()` are used to
download Prosite and Prosite documentation records in HTML format. To
create a web page showing one Prosite record, you can use
```
from Bio import ExPASy
handle = ExPASy.get_prosite_entry('PS00001')
html = handle.read()
output = open("myprositerecord.html", "w")
output.write(html)
output.close()
```
and similarly for a Prosite documentation record:
```
from Bio import ExPASy
handle = ExPASy.get_prodoc_entry('PDOC00001')
html = handle.read()
output = open("myprodocrecord.html", "w")
output.write(html)
output.close()
```
For these functions, an invalid accession number returns an error
message in HTML format.
Scanning the Prosite database
-----------------------------
[ScanProsite](http://www.expasy.org/tools/scanprosite/) allows you to
scan protein sequences online against the Prosite database by providing
a UniProt or PDB sequence identifier or the sequence itself. For more
information about ScanProsite, please see the [ScanProsite
documentation](http://www.expasy.org/tools/scanprosite/scanprosite-doc.html)
as well as the [documentation for programmatic access of
ScanProsite](http://www.expasy.org/tools/scanprosite/ScanPrositeREST.html).
You can use Biopythonโs `Bio.ExPASy.ScanProsite` module to scan the
Prosite database from Python. This module both helps you to access
ScanProsite programmatically, and to parse the results returned by
ScanProsite. To scan for Prosite patterns in the following protein
sequence:
```
MEHKEVVLLLLLFLKSGQGEPLDDYVNTQGASLFSVTKKQLGAGSIEECAAKCEEDEEFT
CRAFQYHSKEQQCVIMAENRKSSIIIRMRDVVLFEKKVYLSECKTGNGKNYRGTMSKTKN
```
you can use the following code:
```
sequence = "MEHKEVVLLLLLFLKSGQGEPLDDYVNTQGASLFSVTKKQLGAGSIEECAAKCEEDEEFT
from Bio.ExPASy import ScanProsite
handle = ScanProsite.scan(seq=sequence)
```
By executing `handle.read()`, you can obtain the search results in raw
XML format. Instead, letโs use `Bio.ExPASy.ScanProsite.read` to parse
the raw XML into a Python object:
```
result = ScanProsite.read(handle)
type(result)
```
A `Bio.ExPASy.ScanProsite.Record` object is derived from a list, with
each element in the list storing one ScanProsite hit. This object also
stores the number of hits, as well as the number of search sequences, as
returned by ScanProsite. This ScanProsite search resulted in six hits:
```
result.n_seq
result.n_match
len(result)
result[0]
result[1]
result[2]
result[3]
result[4]
result[5]
```
Other ScanProsite parameters can be passed as keyword arguments; see the
[documentation for programmatic access of
ScanProsite](http://www.expasy.org/tools/scanprosite/ScanPrositeREST.html)
for more information. As an example, passing `lowscore=1` to include
matches with low level scores lets use find one additional hit:
```
handle = ScanProsite.scan(seq=sequence, lowscore=1)
result = ScanProsite.read(handle)
result.n_match
```
| github_jupyter |
# Import packages and functions
```
import sys
# force the notebook to look for files in the upper level directory
sys.path.insert(1, '../')
import pandas as pd
from glob import glob
import pymatgen as mg
from data.compound_featurizer import read_new_struct, \
get_struct, get_elem_info, get_elem_distances, \
calc_mm_dists, calc_mx_dists, calc_xx_dists, calc_elem_max_potential
```
# Read in the initial dataframe
```
# initialize an empty list of dataframes
df_lst = []
# iterate over all the cif files
for struct_file_path in glob("./user_defined_structures/featurizer_sub_function_demo/*.cif"):
# add the newly read in dataframe to the list
df_lst.append(read_new_struct(struct_file_path))
# concatenate all the dataframes in the list
df = pd.concat(df_lst, ignore_index=True)
# assign oxidation states to BaTiO3 and Mg2AlFeO5
df.at[df[df.Compound == "BaTiO3"].index[0], "structure"].add_oxidation_state_by_element({"Ba": 2, "Ti": 4, "O": -2})
df.at[df[df.Compound == "Mg2AlFeO5"].index[0], "structure"].add_oxidation_state_by_element({"Mg": 2, "Al": 3, "Fe": 3, "O": -2})
# here is a print out of the dataframe
df
```
# Demo usage of relevant sub-functions
## 1. get_struct("compound_formula", input_df) -> Pymatgen Structure
Since we've already read in all the structures in dataframe, we can access the individual Pymatgen structure using the compound formula.
_Tip_: when you have questions about a specific function, you can always go to the original .py file or you can press <kbd>โง Shift</kbd> + <kbd>โฅ Tab</kbd> for its docstring
```
test_struct = get_struct("BaTiO3", df)
test_struct
```
If you happen to type in a formula that doesn't have an exact match, the function will return an error message along with several possible suggestions
```
get_struct("BaTiO", df)
```
_BaTiO3_ will be used consistently as the demo test structure from now on.
## 2. get_elem_distances(Pymatgen_Structure, Pymatgen_Element_1, Pymatgen_Element_2) -> Array of distances (ร
)
Now that we have the structure, we can use **get_elem_distances()** to calculate the distance between any two elements in the structure
But before doing that, we first need to know which site(s) each element occupies through the **get_elem_info()** function
```
elem_indices, _, modified_struct = get_elem_info(test_struct)
print(elem_indices, "\n")
print(modified_struct)
```
If you compare this to the printout from the original, you will find that the modified structure have double the amount of sites
```
print(test_struct)
```
This is because if we keep the original function, _Ba_ and _Ti_ will only occupy one site
```
elem_indices_orig, *_ = get_elem_info(test_struct, makesupercell=False)
elem_indices_orig
```
The reason for returning a supercell of the original structure is related to the inner workings of **get_elem_distances()** function. It basically works by getting the site indices of the two elements (they can be the same) and using the built-in method of **pymatgen.Structure.get_distance(i, j)** to calculate the distance between site i and site j. There is one scenario where only using the original structure can cause a problem:
1. If we have a structure where an element only occupies one site and we want to know the distance between the same elements, e.g. _Ba_-_Ba_ or _Ti_-_Ti_ in _BaTiO3_, we would have **pymatgen.Structure.get_distance(i, j)** where i = j and we would only get 0 for that distance.
By making a supercell (in this case a'=2a, b'=b, c'=c), we would be able to get a non-zero distance betweem the original site and the newly translated site along the a-axis. That being said, if all elements in the original structure all occupy more than one site, the structure will not be modified.
Let's first try to calculate the _Ba_-_Ba_ distance using the supercell structure
```
get_elem_distances(test_struct,
elem_1=mg.Element("Ba"),
elem_indices=elem_indices, only_unique=True)
```
**Note**: when the `only_unique` parameter is set to be `True`, the function will only return the unique values of distance since in a structure the same distance can occur multiple times due to symmetry.
Let's see what happens when we use the original reduced structure
```
get_elem_distances(test_struct,
elem_1=mg.Element("Ba"),
elem_indices=elem_indices_orig, only_unique=True)
```
As expected, we get 0 ร
. We can also calculate the distance between different elements. Let's see the distance between _Ti_ and _O_
```
get_elem_distances(test_struct,
elem_1=mg.Element("O"), elem_2=mg.Element("Ti"),
elem_indices=elem_indices_orig, only_unique=True)
```
This function can also handle structures where multiple elements can occupy the same site (La$_{2.8}$Mg$_{1.2}$Mn$_4$O$_{12}$ is a made-up structure generated for the purpose of this demo)
```
special_struct = get_struct("La2.8Mg1.2Mn4O12", df)
print(special_struct)
elem_indices, *_ = get_elem_info(special_struct)
distances = get_elem_distances(special_struct,
elem_1=mg.Element("La"), elem_2=mg.Element("Mn"),
elem_indices=elem_indices, only_unique=True)
distances
```
It may seem that there are some distances that are equal to each other, but since the values displayed do not have all the decimal places shown, there are still slight differences among them.
```
distances[0] - distances[1]
```
## 3. Wrapper functions around get_elem_distances() to calculate distances between different types of elements
### 3.1 calc_mm_dists() to calculate distances between metal-metal elements
```
calc_mm_dists(test_struct, return_unique=True)
```
### 3.2 calc_mx_dists() to calculate distances between metal-non_metal elements
```
calc_mx_dists(test_struct, return_unique=True)
```
### 3.3 calc_xx_dists() to calculate distances between non_metal-non_metal elements
```
calc_xx_dists(test_struct, return_unique=True)
```
This functionality is realized again through the **get_elem_info()** function where all the elements in the structure is classified as either a metal or a non_metal.
```
_, elem_groups, _ = get_elem_info(test_struct)
elem_groups
```
Once we know which elements are metal and which ones are non_metal, we can then use the elem_indices to find where they are (i.e. the site indices) and compute the distances using the generic element distance finder **get_elem_distances()**.
## 4. calc_elem_max_potential() to calculate Madelung Site Potentials
The **calc_elem_max_potential()** utilizes the EwaldSummation() module from Pymatgen to calculate site energy for all the sites in a structure and convert the site energy to site potential using the relation as follows. ($U_{E_\text{tot}}$: the total potential energy of the structure, $U_{E_i}$: the site energy at site i, $N$: the total number of sites, $q_i$: the charge at site i, $\Phi(r_i)$: the site potential at site i)
$$
\begin{align*}
U_{E_\text{tot}}&=\sum_{i=1}^{N}U_{E_i}=\frac{1}{2}\sum_{i=1}^{N}q_i\Phi(r_i)\\
U_{E_i}&=\frac{1}{2}q_i\Phi(r_i)\\
\Phi(r_i)&=\frac{2U_{E_i}}{q_i}
\end{align*}
$$
The default output unit for the Madelung site potential is in $V$
```
calc_elem_max_potential(test_struct, full_list=True)
```
But the unit can be converted from $V$ to $e/ร
$ for easier comparison with the results from VESTA
```
calc_elem_max_potential(test_struct, full_list=True, check_vesta=True)
```
If we don't specify the `full_list` parameter, it will be set to `False` and the function only return the maximum site potential for each element.
```
calc_elem_max_potential(test_struct)
```
Just like before, this function can also work with structures where multiple elements occupy the same site. We can try a compound with non-integer stoichiometry this time. (again, Mg$_2$AlFeO$_5$ is a made-up structure)
```
non_stoich_struct = get_struct("Mg2AlFeO5", df)
print(non_stoich_struct)
calc_elem_max_potential(non_stoich_struct, check_vesta=True)
```
# Now it's your turn
If you want to test the functions with structures that are not in the loaded dataframe, you can also upload your own .cif file to the `user_defined` folder located at this path
_./user_defined_structures/_
```
USER_DEFINED_FOLDER_PATH = "./user_defined_structures/"
example_new_struct = mg.Structure.from_file(USER_DEFINED_FOLDER_PATH + "CuNiO2_mp-1178372_primitive.cif")
example_new_struct
```
## Define a wrapper function around get_elem_distances()
```
def get_elem_distances_wrapper(structure: mg.Structure, **kwargs):
"""A wrapper function around get_elem_distances() such that there is no need to get elem_indices manually"""
elem_indices, _, structure = get_elem_info(structure)
return get_elem_distances(structure, elem_indices=elem_indices, only_unique=True, **kwargs)
```
Check the _Cu_-_Ni_ distance
```
get_elem_distances_wrapper(example_new_struct, elem_1=mg.Element("Cu"), elem_2=mg.Element("Ni"))
```
Check the _Ni_-_O_ distance
```
get_elem_distances_wrapper(example_new_struct, elem_1=mg.Element("O"), elem_2=mg.Element("Ni"))
```
Check the _Cu_-_Cu_ distance
```
get_elem_distances_wrapper(example_new_struct, elem_1=mg.Element("Cu"))
```
## Get distances of all three types of element pairs
```
calc_mm_dists(example_new_struct)
calc_mx_dists(example_new_struct)
calc_xx_dists(example_new_struct)
```
## A note for site potential calculation
To use the EwaldSummation technique, the input structure has to have oxidation states (that's where the charge value comes from) associated with all the sites. A structure without oxidation states will raise an error in the function.
```
calc_elem_max_potential(example_new_struct)
```
To overcome this problem, we can add oxidation states to the structure using the add_oxidation_state_by_guess() method from Pymatgen
```
example_new_struct.add_oxidation_state_by_guess()
example_new_struct
```
Now that we should be able to obtain proper results from the function.
```
calc_elem_max_potential(example_new_struct, check_vesta=True)
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.