text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
# In this notebook a Q learner with dyna will be trained and evaluated. The Q learner recommends when to buy or sell shares of one particular stock, and in which quantity (in fact it determines the desired fraction of shares in the total portfolio value).
```
# Basic imports
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import scipy.optimize as spo
import sys
from time import time
from sklearn.metrics import r2_score, median_absolute_error
from multiprocessing import Pool
%matplotlib inline
%pylab inline
pylab.rcParams['figure.figsize'] = (20.0, 10.0)
%load_ext autoreload
%autoreload 2
sys.path.append('../../')
import recommender.simulator as sim
from utils.analysis import value_eval
from recommender.agent import Agent
from functools import partial
NUM_THREADS = 1
LOOKBACK = 252*2 + 28
STARTING_DAYS_AHEAD = 20
POSSIBLE_FRACTIONS = [0.0, 1.0]
DYNA = 20
# Get the data
SYMBOL = 'SPY'
total_data_train_df = pd.read_pickle('../../data/data_train_val_df.pkl').stack(level='feature')
data_train_df = total_data_train_df[SYMBOL].unstack()
total_data_test_df = pd.read_pickle('../../data/data_test_df.pkl').stack(level='feature')
data_test_df = total_data_test_df[SYMBOL].unstack()
if LOOKBACK == -1:
total_data_in_df = total_data_train_df
data_in_df = data_train_df
else:
data_in_df = data_train_df.iloc[-LOOKBACK:]
total_data_in_df = total_data_train_df.loc[data_in_df.index[0]:]
# Create many agents
index = np.arange(NUM_THREADS).tolist()
env, num_states, num_actions = sim.initialize_env(total_data_in_df,
SYMBOL,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS)
agents = [Agent(num_states=num_states,
num_actions=num_actions,
random_actions_rate=0.98,
random_actions_decrease=0.999,
dyna_iterations=DYNA,
name='Agent_{}'.format(i)) for i in index]
def show_results(results_list, data_in_df, graph=False):
for values in results_list:
total_value = values.sum(axis=1)
print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(total_value))))
print('-'*100)
initial_date = total_value.index[0]
compare_results = data_in_df.loc[initial_date:, 'Close'].copy()
compare_results.name = SYMBOL
compare_results_df = pd.DataFrame(compare_results)
compare_results_df['portfolio'] = total_value
std_comp_df = compare_results_df / compare_results_df.iloc[0]
if graph:
plt.figure()
std_comp_df.plot()
```
## Let's show the symbols data, to see how good the recommender has to be.
```
print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(data_in_df['Close'].iloc[STARTING_DAYS_AHEAD:]))))
# Simulate (with new envs, each time)
n_epochs = 4
for i in range(n_epochs):
tic = time()
env.reset(STARTING_DAYS_AHEAD)
results_list = sim.simulate_period(total_data_in_df,
SYMBOL,
agents[0],
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_in_df)
env.reset(STARTING_DAYS_AHEAD)
results_list = sim.simulate_period(total_data_in_df,
SYMBOL, agents[0],
learn=False,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
other_env=env)
show_results([results_list], data_in_df, graph=True)
```
## Let's run the trained agent, with the test set
### First a non-learning test: this scenario would be worse than what is possible (in fact, the q-learner can learn from past samples in the test set without compromising the causality).
```
TEST_DAYS_AHEAD = 20
env.set_test_data(total_data_test_df, TEST_DAYS_AHEAD)
tic = time()
results_list = sim.simulate_period(total_data_test_df,
SYMBOL,
agents[0],
learn=False,
starting_days_ahead=TEST_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_test_df, graph=True)
```
### And now a "realistic" test, in which the learner continues to learn from past samples in the test set (it even makes some random moves, though very few).
```
env.set_test_data(total_data_test_df, TEST_DAYS_AHEAD)
tic = time()
results_list = sim.simulate_period(total_data_test_df,
SYMBOL,
agents[0],
learn=True,
starting_days_ahead=TEST_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_test_df, graph=True)
```
## What are the metrics for "holding the position"?
```
print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(data_test_df['Close'].iloc[TEST_DAYS_AHEAD:]))))
```
## Conclusion:
```
import pickle
with open('../../data/simple_q_learner_fast_learner_full_training.pkl', 'wb') as best_agent:
pickle.dump(agents[0], best_agent)
```
| github_jupyter |
```
import cv2
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
img = cv2.imread("../imori.jpg")
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
def bgr_to_gray(img):
b = img[:, :, 0].copy()
g = img[:, :, 1].copy()
r = img[:, :, 2].copy()
output_image = (0.2126*r + 0.7152*g + 0.0722*b).astype("uint8")
return output_image
def otsu_binarization(img_gray):
# Otsu's binarization algorithm
max_variance, threshold = -1, -1
for v in range(0, 256):
c0 = np.count_nonzero(img_gray < v)
c1 = img_gray.shape[0] * img_gray.shape[1] - c0
v0 = img_gray[np.where(img_gray < v)]
v1 = img_gray[np.where(img_gray >= v)]
m0 = np.mean(v0) if len(v0) > 0 else 0
m1 = np.mean(v1) if len(v1) > 0 else 0
variance = c0 * c1 * ((m0-m1)**2)
if variance > max_variance:
max_variance = variance
threshold = v
print("image shape =", img_gray.shape)
print("optimal threshold =", threshold)
img_binary = np.vectorize(lambda x: 0 if x < threshold else 255)(img_gray)
return img_binary
# 収縮
def morphology_erode(img, iteration=1):
H, W = img.shape
input_img = np.pad(img, (1, 1), "edge")
K = np.array(( (0, 1, 0), (1, 1, 1), (0, 1, 0) ), dtype=np.int)
for t in range(iteration):
output_img = np.ones((H+2, W+2)) * 255
for i in range(1, H+1):
for j in range(1, W+1):
if np.sum(K * input_img[i-1:i+2, j-1:j+2]) < 255*5:
output_img[i, j] = 0
input_img = output_img.copy()
output_img = input_img[1:1+H, 1:1+W]
return output_img
# 膨張
def morphology_dilate(img, iteration=1):
H, W = img.shape
input_img = np.pad(img, (1, 1), "edge")
K = np.array(( (0, 1, 0), (1, 1, 1), (0, 1, 0) ), dtype=np.int)
for t in range(iteration):
output_img = np.zeros((H+2, W+2))
for i in range(1, H+1):
for j in range(1, W+1):
if np.sum(K * input_img[i-1:i+2, j-1:j+2]) >= 255:
output_img[i, j] = 255
input_img = output_img.copy()
output_img = input_img[1:1+H, 1:1+W]
return output_img
def morphology_gradient(img_gray):
img_otsu = otsu_binarization(img_gray)
img_erode = morphology_erode(img_otsu)
img_dilate = morphology_dilate(img_otsu)
img_out = np.abs(img_erode - img_dilate).astype("uint8")
return img_out
img_gray = bgr_to_gray(img)
img_grad = morphology_gradient(img_gray)
plt.imshow(img_grad, cmap="gray", vmin=0, vmax=255)
plt.show()
```
| github_jupyter |
# Predictions with Faster RCNN
We need to install coco tools.
```
#!pip install pycocotools-windows
import pycocotools
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
from PIL import ImageDraw
import torch
import torch.utils.data
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from engine import train_one_epoch, evaluate
import utils
import transforms as T
# Define current path
current_path = os.getcwd()
def parse_one_annot(path_to_data_file, filename):
data = pd.read_csv(path_to_data_file)
boxes_array = data[data["filename"] == filename][["xmin", "ymin",
"xmax", "ymax"]].values
return boxes_array
class DressDataset(torch.utils.data.Dataset):
def __init__(self, root, data_file, transforms=None):
self.root = root
self.transforms = transforms
self.imgs = sorted(os.listdir(os.path.join(root, "images")))
self.path_to_data_file = data_file
def __getitem__(self, idx):
# load images and bounding boxes
img_path = os.path.join(self.root, "images", self.imgs[idx])
img = Image.open(img_path).convert("RGB")
box_list = parse_one_annot(self.path_to_data_file,self.imgs[idx])
boxes = torch.as_tensor(box_list, dtype=torch.float32)
num_objs = len(box_list)
# there is only one class
labels = torch.ones((num_objs,), dtype=torch.int64)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:,0])
# suppose all instances are not crowd
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.imgs)
def get_model(num_classes):
# load an object detection model pre-trained on COCO
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
# get the number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new on
model.roi_heads.box_predictor = FastRCNNPredictor(in_features,num_classes)
return model
def get_transform(train):
transforms = []
# converts the image, a PIL image, into a PyTorch Tensor
transforms.append(T.ToTensor())
if train:
# during training, randomly flip the training images
# and ground-truth for data augmentation
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
dataset = DressDataset(root= current_path + "/dress_dataset",data_file= current_path+ "/dress_dataset/labels/dress_labels.csv",transforms = get_transform(train=False))
data_loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=4,collate_fn=utils.collate_fn)
loaded_model = get_model(num_classes = 2)
loaded_model.load_state_dict(torch.load(current_path + "/model_saved",map_location=torch.device('cpu')))
def predict_box(idx):
img, _ = dataset[idx]
label_boxes = np.array(dataset[idx][1]["boxes"])
#put the model in evaluation mode
loaded_model.eval()
with torch.no_grad():
prediction = loaded_model([img])
image = Image.fromarray(img.mul(255).permute(1, 2,0).byte().numpy())
draw = ImageDraw.Draw(image)
# draw groundtruth
for elem in range(len(label_boxes)):
draw.rectangle([(label_boxes[elem][0], label_boxes[elem][1]),(label_boxes[elem][2], label_boxes[elem][3])],outline ="green", width =3)
for element in range(len(prediction[0]["boxes"])):
boxes = prediction[0]["boxes"][element].cpu().numpy()
score = np.round(prediction[0]["scores"][element].cpu().numpy(),decimals= 4)
if score > 0.9:
draw.rectangle([(boxes[0], boxes[1]), (boxes[2], boxes[3])],outline ="red", width =3)
draw.text((boxes[0], boxes[1]), text = str(score))
if (score > 0.8 and score <= 0.9) :
draw.rectangle([(boxes[0], boxes[1]), (boxes[2], boxes[3])],outline ="purple", width =3)
draw.text((boxes[0], boxes[1]), text = str(score))
return(image)
idxs = [569,583]
for i in range(0,len(idxs)):
plt.figure(i+1,figsize = (5, 5))
plt.subplot(1, 1, 1)
plt.axis('off')
plt.imshow(predict_box(idxs[i]))
plt.show()
```
| github_jupyter |
*Sebastian Raschka*
last modified: 04/03/2014
<hr>
I am really looking forward to your comments and suggestions to improve and extend this tutorial! Just send me a quick note
via Twitter: [@rasbt](https://twitter.com/rasbt)
or Email: [bluewoodtree@gmail.com](mailto:bluewoodtree@gmail.com)
<hr>
### Problem Category
- Statistical Pattern Recognition
- Supervised Learning
- Parametric Learning
- Bayes Decision Theory
- Multivariate data (2-dimensional)
- 2-class problem
- equal variances
- equal prior probabilities
- Gaussian model (2 parameters)
- no conditional Risk (1-0 loss functions)
<hr>
<p><a name="sections"></a>
<br></p>
# Sections
<p>• <a href="#given">Given information</a><br>
• <a href="#deriving_db">Deriving the decision boundary</a><br>
• <a href="#classify_rand">Classifying some random example data</a><br>
• <a href="#chern_err">Calculating the Chernoff theoretical bounds for P(error)</a><br>
• <a href="#emp_err">Calculating the empirical error rate</a><br>
<hr>
<p><a name="given"></a>
<br></p>
## Given information:
[<a href="#sections">back to top</a>] <br>
<br>
####model: continuous univariate normal (Gaussian) model for the class-conditional densities
$p(\vec{x} | \omega_j) \sim N(\vec{\mu}|\Sigma)$
$p(\vec{x} | \omega_j) \sim \frac{1}{(2\pi)^{d/2} |\Sigma|^{1/2}} \exp{ \bigg[-\frac{1}{2} (\vec{x}-\vec{\mu})^t \Sigma^{-1}(\vec{x}-\vec{\mu}) \bigg] }$
####Prior probabilities:
$P(\omega_1) = P(\omega_2) = 0.5$
The samples are of 2-dimensional feature vectors:
$\vec{x} = \bigg[
\begin{array}{c}
x_1 \\
x_2 \\
\end{array} \bigg]$
#### Means of the sample distributions for 2-dimensional features:
$\vec{\mu}_{\,1} = \bigg[
\begin{array}{c}
0 \\
0 \\
\end{array} \bigg]$,
$\; \vec{\mu}_{\,2} = \bigg[
\begin{array}{c}
1 \\
1 \\
\end{array} \bigg]$
#### Covariance matrices for the statistically independend and identically distributed ('i.i.d') features:
$\Sigma_i = \bigg[
\begin{array}{cc}
\sigma_{11}^2 & \sigma_{12}^2\\
\sigma_{21}^2 & \sigma_{22}^2 \\
\end{array} \bigg], \;
\Sigma_1 = \Sigma_2 = I = \bigg[
\begin{array}{cc}
1 & 0\\
0 & 1 \\
\end{array} \bigg], \;$
####Class conditional probabilities:
$p(\vec{x}\;|\;\omega_1) \sim N \bigg( \vec{\mu_1} = \; \bigg[
\begin{array}{c}
0 \\
0 \\
\end{array} \bigg], \Sigma = I \bigg)$
$p(\vec{x}\;|\;\omega_2) \sim N \bigg( \vec{\mu_2} = \; \bigg[
\begin{array}{c}
1 \\
1 \\
\end{array} \bigg], \Sigma = I \bigg)$
<p><a name="deriving_db"></a>
<br></p>
## Deriving the decision boundary
[<a href="#sections">back to top</a>] <br>
### Bayes' Rule:
$P(\omega_j|x) = \frac{p(x|\omega_j) * P(\omega_j)}{p(x)}$
### Discriminant Functions:
The goal is to maximize the discriminant function, which we define as the posterior probability here to perform a **minimum-error classification** (Bayes classifier).
$g_1(\vec{x}) = P(\omega_1 | \; \vec{x}), \quad g_2(\vec{x}) = P(\omega_2 | \; \vec{x})$
$\Rightarrow g_1(\vec{x}) = P(\vec{x}|\;\omega_1) \;\cdot\; P(\omega_1) \quad | \; ln \\
\quad g_2(\vec{x}) = P(\vec{x}|\;\omega_2) \;\cdot\; P(\omega_2) \quad | \; ln$
<br>
We can drop the prior probabilities (since we have equal priors in this case):
$\Rightarrow g_1(\vec{x}) = ln(P(\vec{x}|\;\omega_1))\\
\quad g_2(\vec{x}) = ln(P(\vec{x}|\;\omega_2))$
$\Rightarrow g_1(\vec{x}) = \frac{1}{2\sigma^2} \bigg[\; \vec{x}^{\,t} - 2 \vec{\mu_1}^{\,t} \vec{x} + \vec{\mu_1}^{\,t} \bigg] \mu_1 \\
= - \frac{1}{2} \bigg[ \vec{x}^{\,t} \vec{x} -2 \; [0 \;\; 0] \;\; \vec{x} + [0 \;\; 0] \;\; \bigg[
\begin{array}{c}
0 \\
0 \\
\end{array} \bigg] \bigg] \\
= -\frac{1}{2} \vec{x}^{\,t} \vec{x}$
$\Rightarrow g_2(\vec{x}) = \frac{1}{2\sigma^2} \bigg[\; \vec{x}^{\,t} - 2 \vec{\mu_2}^{\,t} \vec{x} + \vec{\mu_2}^{\,t} \bigg] \mu_2 \\
= - \frac{1}{2} \bigg[ \vec{x}^{\,t} \vec{x} -2 \; 2\; [1 \;\; 1] \;\; \vec{x} + [1 \;\; 1] \;\; \bigg[
\begin{array}{c}
1 \\
1 \\
\end{array} \bigg] \bigg] \\
= -\frac{1}{2} \; \bigg[ \; \vec{x}^{\,t} \vec{x} - 2\; [1 \;\; 1] \;\; \vec{x} + 2\; \bigg] \;$
### Decision Boundary
$g_1(\vec{x}) = g_2(\vec{x})$
$\Rightarrow -\frac{1}{2} \vec{x}^{\,t} \vec{x} = -\frac{1}{2} \; \bigg[ \; \vec{x}^{\,t} \vec{x} - 2\; [1 \;\; 1] \;\; \vec{x} + 2\; \bigg] \;$
$\Rightarrow -2[1\;\; 1] \vec{x} + 2 = 0$
$\Rightarrow [-2\;\; -2] \;\;\vec{x} + 2 = 0$
$\Rightarrow -2x_1 - 2x_2 + 2 = 0$
$\Rightarrow -x_1 - x_2 + 1 = 0$
<p><a name="classify_rand"></a>
<br></p>
## Classifying some random example data
[<a href="#sections">back to top</a>] <br>
```
%pylab inline
import numpy as np
from matplotlib import pyplot as plt
def decision_boundary(x_1):
""" Calculates the x_2 value for plotting the decision boundary."""
return -x_1 + 1
# Generate 100 random patterns for class1
mu_vec1 = np.array([0,0])
cov_mat1 = np.array([[1,0],[0,1]])
x1_samples = np.random.multivariate_normal(mu_vec1, cov_mat1, 100)
mu_vec1 = mu_vec1.reshape(1,2).T # to 1-col vector
# Generate 100 random patterns for class2
mu_vec2 = np.array([1,1])
cov_mat2 = np.array([[1,0],[0,1]])
x2_samples = np.random.multivariate_normal(mu_vec2, cov_mat2, 100)
mu_vec2 = mu_vec2.reshape(1,2).T # to 1-col vector
# Scatter plot
f, ax = plt.subplots(figsize=(7, 7))
ax.scatter(x1_samples[:,0], x1_samples[:,1], marker='o', color='green', s=40, alpha=0.5)
ax.scatter(x2_samples[:,0], x2_samples[:,1], marker='^', color='blue', s=40, alpha=0.5)
plt.legend(['Class1 (w1)', 'Class2 (w2)'], loc='upper right')
plt.title('Densities of 2 classes with 100 bivariate random patterns each')
plt.ylabel('x2')
plt.xlabel('x1')
ftext = 'p(x|w1) ~ N(mu1=(0,0)^t, cov1=I)\np(x|w2) ~ N(mu2=(1,1)^t, cov2=I)'
plt.figtext(.15,.8, ftext, fontsize=11, ha='left')
plt.ylim([-3,4])
plt.xlim([-3,4])
# Plot decision boundary
x_1 = np.arange(-5, 5, 0.1)
bound = decision_boundary(x_1)
plt.annotate('R1', xy=(-2, 2), xytext=(-2, 2), size=20)
plt.annotate('R2', xy=(2.5, 2.5), xytext=(2.5, 2.5), size=20)
plt.plot(x_1, bound, color='r', alpha=0.8, linestyle=':', linewidth=3)
x_vec = np.linspace(*ax.get_xlim())
x_1 = np.arange(0, 100, 0.05)
plt.show()
```
<p><a name="chern_err"></a>
<br></p>
## Calculating the Chernoff theoretical bounds for P(error)
[<a href="#sections">back to top</a>] <br>
$P(error) \le p^{\beta}(\omega_1) \; p^{1-\beta}(\omega_2) \; e^{-(\beta(1-\beta))}$
$\Rightarrow 0.5^\beta \cdot 0.5^{(1-\beta)} \; e^{-(\beta(1-\beta))}$
$\Rightarrow 0.5 \cdot e^{-\beta(1-\beta)}$
$min[P(\omega_1), \; P(\omega_2)] \le 0.5 \; e^{-(\beta(1-\beta))} \quad for \; P(\omega_1), \; P(\omega_2) \ge \; 0 \; and \; 0 \; \le \; \beta \; \le 1$
### Plotting the Chernoff Bound for $0 \le \beta \le 1$
```
def chernoff_bound(beta):
return 0.5 * np.exp(-beta * (1-beta))
betas = np.arange(0, 1, 0.01)
c_bound = chernoff_bound(betas)
plt.plot(betas, c_bound)
plt.title('Chernoff Bound')
plt.ylabel('P(error)')
plt.xlabel('parameter beta')
plt.show()
```
#### Finding the global minimum:
```
from scipy.optimize import minimize
x0 = [0.39] # initial guess (here: guessed based on the plot)
res = minimize(chernoff_bound, x0, method='Nelder-Mead')
print(res)
```
<p><a name="emp_err"></a>
<br></p>
## Calculating the empirical error rate
[<a href="#sections">back to top</a>] <br>
```
def decision_rule(x_vec):
""" Returns value for the decision rule of 2-d row vectors """
x_1 = x_vec[0]
x_2 = x_vec[1]
return -x_1 - x_2 + 1
w1_as_w2, w2_as_w1 = 0, 0
for x in x1_samples:
if decision_rule(x) < 0:
w1_as_w2 += 1
for x in x2_samples:
if decision_rule(x) > 0:
w2_as_w1 += 1
emp_err = (w1_as_w2 + w2_as_w1) / float(len(x1_samples) + len(x2_samples))
print('Empirical Error: {}%'.format(emp_err * 100))
test complete; Gopal
```
| github_jupyter |
##### Copyright 2021 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Parametrized Quantum Circuits for Reinforcement Learning
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/quantum/tutorials/quantum_reinforcement_learning"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/quantum/blob/master/docs/tutorials/quantum_reinforcement_learning.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/quantum/blob/master/docs/tutorials/quantum_reinforcement_learning.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/quantum/docs/tutorials/quantum_reinforcement_learning.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
Quantum computers have been shown to provide computational advantages in certain problem areas. The field of quantum reinforcement learning (QRL) aims to harness this boost by designing RL agents that rely on quantum models of computation.
In this tutorial, you will implement two reinforcement learning algorithms based on parametrized/variational quantum circuits (PQCs or VQCs), namely a policy-gradient and a deep Q-learning implementation. These algorithms were introduced by [[1] Jerbi et al.](https://arxiv.org/abs/2103.05577) and [[2] Skolik et al.](https://arxiv.org/abs/2103.15084), respectively.
You will implement a PQC with data re-uploading in TFQ, and use it as:
1. an RL policy trained with a policy-gradient method,
2. a Q-function approximator trained with deep Q-learning,
each solving [CartPole-v1](http://gym.openai.com/envs/CartPole-v1/), a benchmarking task from OpenAI Gym. Note that, as showcased in [[1]](https://arxiv.org/abs/2103.05577) and [[2]](https://arxiv.org/abs/2103.15084), these agents can also be used to solve other task-environment from OpenAI Gym, such as [FrozenLake-v0](http://gym.openai.com/envs/FrozenLake-v0/), [MountainCar-v0](http://gym.openai.com/envs/MountainCar-v0/) or [Acrobot-v1](http://gym.openai.com/envs/Acrobot-v1/).
Features of this implementation:
- you will learn how to use a `tfq.layers.ControlledPQC` to implement a PQC with data re-uploading, appearing in many applications of QML. This implementation also naturally allows using trainable scaling parameters at the input of the PQC, to increase its expressivity,
- you will learn how to implement observables with trainable weights at the output of a PQC, to allow a flexible range of output values,
- you will learn how a `tf.keras.Model` can be trained with non-trivial ML loss functions, i.e., that are not compatible with `model.compile` and `model.fit`, using a `tf.GradientTape`.
## Setup
Install TensorFlow:
```
!pip install tensorflow==2.4.1
```
Install TensorFlow Quantum:
```
!pip install tensorflow-quantum
```
Install Gym:
```
!pip install gym==0.18.0
```
Now import TensorFlow and the module dependencies:
```
# Update package resources to account for version changes.
import importlib, pkg_resources
importlib.reload(pkg_resources)
import tensorflow as tf
import tensorflow_quantum as tfq
import gym, cirq, sympy
import numpy as np
from functools import reduce
from collections import deque, defaultdict
import matplotlib.pyplot as plt
from cirq.contrib.svg import SVGCircuit
tf.get_logger().setLevel('ERROR')
```
## 1. Build a PQC with data re-uploading
At the core of both RL algorithms you are implementing is a PQC that takes as input the agent's state $s$ in the environment (i.e., a numpy array) and outputs a vector of expectation values. These expectation values are then post-processed, either to produce an agent's policy $\pi(a|s)$ or approximate Q-values $Q(s,a)$. In this way, the PQCs are playing an analog role to that of deep neural networks in modern deep RL algorithms.
A popular way to encode an input vector in a PQC is through the use of single-qubit rotations, where rotation angles are controlled by the components of this input vector. In order to get a [highly-expressive model](https://arxiv.org/abs/2008.08605), these single-qubit encodings are not performed only once in the PQC, but in several "[re-uploadings](https://quantum-journal.org/papers/q-2020-02-06-226/)", interlayed with variational gates. The layout of such a PQC is depicted below:
<img src="./images/pqc_re-uploading.png" width="700">
As discussed in [[1]](https://arxiv.org/abs/2103.05577) and [[2]](https://arxiv.org/abs/2103.15084), a way to further enhance the expressivity and trainability of data re-uploading PQCs is to use trainable input-scaling parameters $\boldsymbol{\lambda}$ for each encoding gate of the PQC, and trainable observable weights $\boldsymbol{w}$ at its output.
### 1.1 Cirq circuit for ControlledPQC
The first step is to implement in Cirq the quantum circuit to be used as the PQC. For this, start by defining basic unitaries to be applied in the circuits, namely an arbitrary single-qubit rotation and an entangling layer of CZ gates:
```
def one_qubit_rotation(qubit, symbols):
"""
Returns Cirq gates that apply a rotation of the bloch sphere about the X,
Y and Z axis, specified by the values in `symbols`.
"""
return [cirq.rx(symbols[0])(qubit),
cirq.ry(symbols[1])(qubit),
cirq.rz(symbols[2])(qubit)]
def entangling_layer(qubits):
"""
Returns a layer of CZ entangling gates on `qubits` (arranged in a circular topology).
"""
cz_ops = [cirq.CZ(q0, q1) for q0, q1 in zip(qubits, qubits[1:])]
cz_ops += ([cirq.CZ(qubits[0], qubits[-1])] if len(qubits) != 2 else [])
return cz_ops
```
Now, use these functions to generate the Cirq circuit:
```
def generate_circuit(qubits, n_layers):
"""Prepares a data re-uploading circuit on `qubits` with `n_layers` layers."""
# Number of qubits
n_qubits = len(qubits)
# Sympy symbols for variational angles
params = sympy.symbols(f'theta(0:{3*(n_layers+1)*n_qubits})')
params = np.asarray(params).reshape((n_layers + 1, n_qubits, 3))
# Sympy symbols for encoding angles
inputs = sympy.symbols(f'x(0:{n_layers})'+f'_(0:{n_qubits})')
inputs = np.asarray(inputs).reshape((n_layers, n_qubits))
# Define circuit
circuit = cirq.Circuit()
for l in range(n_layers):
# Variational layer
circuit += cirq.Circuit(one_qubit_rotation(q, params[l, i]) for i, q in enumerate(qubits))
circuit += entangling_layer(qubits)
# Encoding layer
circuit += cirq.Circuit(cirq.rx(inputs[l, i])(q) for i, q in enumerate(qubits))
# Last varitional layer
circuit += cirq.Circuit(one_qubit_rotation(q, params[n_layers, i]) for i,q in enumerate(qubits))
return circuit, list(params.flat), list(inputs.flat)
```
Check that this produces a circuit that is alternating between variational and encoding layers.
```
n_qubits, n_layers = 3, 1
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit, _, _ = generate_circuit(qubits, n_layers)
SVGCircuit(circuit)
```
### 1.2 ReUploadingPQC layer using ControlledPQC
To construct the re-uploading PQC from the figure above, you can create a custom Keras layer. This layer will manage the trainable parameters (variational angles $\boldsymbol{\theta}$ and input-scaling parameters $\boldsymbol{\lambda}$) and resolve the input values (input state $s$) into the appropriate symbols in the circuit.
```
class ReUploadingPQC(tf.keras.layers.Layer):
"""
Performs the transformation (s_1, ..., s_d) -> (theta_1, ..., theta_N, lmbd[1][1]s_1, ..., lmbd[1][M]s_1,
......., lmbd[d][1]s_d, ..., lmbd[d][M]s_d) for d=input_dim, N=theta_dim and M=n_layers.
An activation function from tf.keras.activations, specified by `activation` ('linear' by default) is
then applied to all lmbd[i][j]s_i.
All angles are finally permuted to follow the alphabetical order of their symbol names, as processed
by the ControlledPQC.
"""
def __init__(self, qubits, n_layers, observables, activation="linear", name="re-uploading_PQC"):
super(ReUploadingPQC, self).__init__(name=name)
self.n_layers = n_layers
self.n_qubits = len(qubits)
circuit, theta_symbols, input_symbols = generate_circuit(qubits, n_layers)
theta_init = tf.random_uniform_initializer(minval=0.0, maxval=np.pi)
self.theta = tf.Variable(
initial_value=theta_init(shape=(1, len(theta_symbols)), dtype="float32"),
trainable=True, name="thetas"
)
lmbd_init = tf.ones(shape=(self.n_qubits * self.n_layers,))
self.lmbd = tf.Variable(
initial_value=lmbd_init, dtype="float32", trainable=True, name="lambdas"
)
# Define explicit symbol order.
symbols = [str(symb) for symb in theta_symbols + input_symbols]
self.indices = tf.constant([symbols.index(a) for a in sorted(symbols)])
self.activation = activation
self.empty_circuit = tfq.convert_to_tensor([cirq.Circuit()])
self.computation_layer = tfq.layers.ControlledPQC(circuit, observables)
def call(self, inputs):
# inputs[0] = encoding data for the state.
batch_dim = tf.gather(tf.shape(inputs[0]), 0)
tiled_up_circuits = tf.repeat(self.empty_circuit, repeats=batch_dim)
tiled_up_thetas = tf.tile(self.theta, multiples=[batch_dim, 1])
tiled_up_inputs = tf.tile(inputs[0], multiples=[1, self.n_layers])
scaled_inputs = tf.einsum("i,ji->ji", self.lmbd, tiled_up_inputs)
squashed_inputs = tf.keras.layers.Activation(self.activation)(scaled_inputs)
joined_vars = tf.concat([tiled_up_thetas, squashed_inputs], axis=1)
joined_vars = tf.gather(joined_vars, self.indices, axis=1)
return self.computation_layer([tiled_up_circuits, joined_vars])
```
## 2. Policy-gradient RL with PQC policies
In this section, you will implement the policy-gradient algorithm presented in <a href="https://arxiv.org/abs/2103.05577" class="external">[1]</a>. For this, you will start by constructing, out of the PQC that was just defined, the `softmax-VQC` policy (where VQC stands for variational quantum circuit):
$$ \pi_\theta(a|s) = \frac{e^{\beta \langle O_a \rangle_{s,\theta}}}{\sum_{a'} e^{\beta \langle O_{a'} \rangle_{s,\theta}}} $$
where $\langle O_a \rangle_{s,\theta}$ are expectation values of observables $O_a$ (one per action) measured at the output of the PQC, and $\beta$ is a tunable inverse-temperature parameter.
You can adopt the same observables used in <a href="https://arxiv.org/abs/2103.05577" class="external">[1]</a> for CartPole, namely a global $Z_0Z_1Z_2Z_3$ Pauli product acting on all qubits, weighted by an action-specific weight for each action. To implement the weighting of the Pauli product, you can use an extra `tf.keras.layers.Layer` that stores the action-specific weights and applies them multiplicatively on the expectation value $\langle Z_0Z_1Z_2Z_3 \rangle_{s,\theta}$.
```
class Alternating(tf.keras.layers.Layer):
def __init__(self, output_dim):
super(Alternating, self).__init__()
self.w = tf.Variable(
initial_value=tf.constant([[(-1.)**i for i in range(output_dim)]]), dtype="float32",
trainable=True, name="obs-weights")
def call(self, inputs):
return tf.matmul(inputs, self.w)
```
Prepare the definition of your PQC:
```
n_qubits = 4 # Dimension of the state vectors in CartPole
n_layers = 5 # Number of layers in the PQC
n_actions = 2 # Number of actions in CartPole
qubits = cirq.GridQubit.rect(1, n_qubits)
```
and its observables:
```
ops = [cirq.Z(q) for q in qubits]
observables = [reduce((lambda x, y: x * y), ops)] # Z_0*Z_1*Z_2*Z_3
```
With this, define a `tf.keras.Model` that applies, sequentially, the `ReUploadingPQC` layer previously defined, followed by a post-processing layer that computes the weighted observables using `Alternating`, which are then fed into a `tf.keras.layers.Softmax` layer that outputs the `softmax-VQC` policy of the agent.
```
def generate_model_policy(qubits, n_layers, n_actions, beta, observables):
"""Generates a Keras model for a data re-uploading PQC policy."""
input_tensor = tf.keras.Input(shape=(len(qubits), ), dtype=tf.dtypes.float32, name='input')
re_uploading_pqc = ReUploadingPQC(qubits, n_layers, observables)([input_tensor])
process = tf.keras.Sequential([
Alternating(n_actions),
tf.keras.layers.Lambda(lambda x: x * beta),
tf.keras.layers.Softmax()
], name="observables-policy")
policy = process(re_uploading_pqc)
model = tf.keras.Model(inputs=[input_tensor], outputs=policy)
return model
model = generate_model_policy(qubits, n_layers, n_actions, 1.0, observables)
tf.keras.utils.plot_model(model, show_shapes=True, dpi=70)
```
You can now train the PQC policy on CartPole-v1, using, e.g., the basic `REINFORCE` algorithm (see Alg. 1 in <a href="https://arxiv.org/abs/2103.05577" class="external">[1]</a>). Pay attention to the following points:
1. Because scaling parameters, variational angles and observables weights are trained with different learning rates, it is convenient to define 3 separate optimizers with their own learning rates, each updating one of these groups of parameters.
2. The loss function in policy-gradient RL is
$$ \mathcal{L}(\theta) = -\frac{1}{|\mathcal{B}|}\sum_{s_0,a_0,r_1,s_1,a_1, \ldots \in \mathcal{B}} \left(\sum_{t=0}^{H-1} \log(\pi_\theta(a_t|s_t)) \sum_{t'=1}^{H-t} \gamma^{t'} r_{t+t'} \right)$$
for a batch $\mathcal{B}$ of episodes $(s_0,a_0,r_1,s_1,a_1, \ldots)$ of interactions in the environment following the policy $\pi_\theta$. This is different from a supervised learning loss with fixed target values that the model should fit, which make it impossible to use a simple function call like `model.fit` to train the policy. Instead, using a `tf.GradientTape` allows to keep track of the computations involving the PQC (i.e., policy sampling) and store their contributions to the loss during the interaction. After running a batch of episodes, you can then apply backpropagation on these computations to get the gradients of the loss with respect to the PQC parameters and use the optimizers to update the policy-model.
Start by defining a function that gathers episodes of interaction with the environment:
```
def gather_episodes(state_bounds, n_actions, model, n_episodes, env_name):
"""Interact with environment in batched fashion."""
trajectories = [defaultdict(list) for _ in range(n_episodes)]
envs = [gym.make(env_name) for _ in range(n_episodes)]
done = [False for _ in range(n_episodes)]
states = [e.reset() for e in envs]
while not all(done):
unfinished_ids = [i for i in range(n_episodes) if not done[i]]
normalized_states = [s/state_bounds for i, s in enumerate(states) if not done[i]]
for i, state in zip(unfinished_ids, normalized_states):
trajectories[i]['states'].append(state)
# Compute policy for all unfinished envs in parallel
states = tf.convert_to_tensor(normalized_states)
action_probs = model([states])
# Store action and transition all environments to the next state
states = [None for i in range(n_episodes)]
for i, policy in zip(unfinished_ids, action_probs.numpy()):
action = np.random.choice(n_actions, p=policy)
states[i], reward, done[i], _ = envs[i].step(action)
trajectories[i]['actions'].append(action)
trajectories[i]['rewards'].append(reward)
return trajectories
```
and a function that computes discounted returns $\sum_{t'=1}^{H-t} \gamma^{t'} r_{t+t'}$ out of the rewards $r_t$ collected in an episode:
```
def compute_returns(rewards_history, gamma):
"""Compute discounted returns with discount factor `gamma`."""
returns = []
discounted_sum = 0
for r in rewards_history[::-1]:
discounted_sum = r + gamma * discounted_sum
returns.insert(0, discounted_sum)
# Normalize them for faster and more stable learning
returns = np.array(returns)
returns = (returns - np.mean(returns)) / (np.std(returns) + 1e-8)
returns = returns.tolist()
return returns
```
Define the hyperparameters:
```
state_bounds = np.array([2.4, 2.5, 0.21, 2.5])
gamma = 1
batch_size = 10
n_episodes = 1000
```
Prepare the optimizers:
```
optimizer_in = tf.keras.optimizers.Adam(learning_rate=0.1, amsgrad=True)
optimizer_var = tf.keras.optimizers.Adam(learning_rate=0.01, amsgrad=True)
optimizer_out = tf.keras.optimizers.Adam(learning_rate=0.1, amsgrad=True)
# Assign the model parameters to each optimizer
w_in, w_var, w_out = 1, 0, 2
```
Implement a function that updates the policy using states, actions and returns:
```
@tf.function
def reinforce_update(states, actions, returns, model):
states = tf.convert_to_tensor(states)
actions = tf.convert_to_tensor(actions)
returns = tf.convert_to_tensor(returns)
with tf.GradientTape() as tape:
tape.watch(model.trainable_variables)
logits = model(states)
p_actions = tf.gather_nd(logits, actions)
log_probs = tf.math.log(p_actions)
loss = tf.math.reduce_sum(-log_probs * returns) / batch_size
grads = tape.gradient(loss, model.trainable_variables)
for optimizer, w in zip([optimizer_in, optimizer_var, optimizer_out], [w_in, w_var, w_out]):
optimizer.apply_gradients([(grads[w], model.trainable_variables[w])])
```
Now implement the main training loop of the agent.
Note: This agent may need to simulate several million quantum circuits and can take as much as ~20 minutes to finish training.
```
env_name = "CartPole-v1"
# Start training the agent
episode_reward_history = []
for batch in range(n_episodes // batch_size):
# Gather episodes
episodes = gather_episodes(state_bounds, n_actions, model, batch_size, env_name)
# Group states, actions and returns in numpy arrays
states = np.concatenate([ep['states'] for ep in episodes])
actions = np.concatenate([ep['actions'] for ep in episodes])
rewards = [ep['rewards'] for ep in episodes]
returns = np.concatenate([compute_returns(ep_rwds, gamma) for ep_rwds in rewards])
returns = np.array(returns, dtype=np.float32)
id_action_pairs = np.array([[i, a] for i, a in enumerate(actions)])
# Update model parameters.
reinforce_update(states, id_action_pairs, returns, model)
# Store collected rewards
for ep_rwds in rewards:
episode_reward_history.append(np.sum(ep_rwds))
avg_rewards = np.mean(episode_reward_history[-10:])
print('Finished episode', (batch + 1) * batch_size,
'Average rewards: ', avg_rewards)
if avg_rewards >= 500.0:
break
```
Plot the learning history of the agent:
```
plt.figure(figsize=(10,5))
plt.plot(episode_reward_history)
plt.xlabel('Epsiode')
plt.ylabel('Collected rewards')
plt.show()
```
Congratulations, you have trained a quantum policy gradient model on Cartpole! The plot above shows the rewards collected by the agent per episode throughout its interaction with the environment. You should see that after a few hundred episodes, the performance of the agent gets close to optimal, i.e., 500 rewards per episode.
You can now visualize the performance of your agent using `env.render()` in a sample episode (uncomment/run the following cell only if your notebook has access to a display):
```
# from PIL import Image
# env = gym.make('CartPole-v1')
# state = env.reset()
# frames = []
# for t in range(500):
# im = Image.fromarray(env.render(mode='rgb_array'))
# frames.append(im)
# policy = model([tf.convert_to_tensor([state/state_bounds])])
# action = np.random.choice(n_actions, p=policy.numpy()[0])
# state, _, done, _ = env.step(action)
# if done:
# break
# env.close()
# frames[1].save('./images/gym_CartPole.gif',
# save_all=True, append_images=frames[2:], optimize=False, duration=40, loop=0)
```
<img src="./images/gym_CartPole.gif" width="700">
## 3. Deep Q-learning with PQC Q-function approximators
In this section, you will move to the implementation of the deep Q-learning algorithm presented in <a href="https://arxiv.org/abs/2103.15084" class="external">[2]</a>. As opposed to a policy-gradient approach, the deep Q-learning method uses a PQC to approximate the Q-function of the agent. That is, the PQC defines a function approximator:
$$ Q_\theta(s,a) = \langle O_a \rangle_{s,\theta} $$
where $\langle O_a \rangle_{s,\theta}$ are expectation values of observables $O_a$ (one per action) measured at the ouput of the PQC.
These Q-values are updated using a loss function derived from Q-learning:
$$ \mathcal{L}(\theta) = \frac{1}{|\mathcal{B}|}\sum_{s,a,r,s' \in \mathcal{B}} \left(Q_\theta(s,a) - [r +\max_{a'} Q_{\theta'}(s',a')]\right)^2$$
for a batch $\mathcal{B}$ of $1$-step interactions $(s,a,r,s')$ with the environment, sampled from the replay memory, and parameters $\theta'$ specifying the target PQC (i.e., a copy of the main PQC, whose parameters are sporadically copied from the main PQC throughout learning).
You can adopt the same observables used in <a href="https://arxiv.org/abs/2103.15084" class="external">[2]</a> for CartPole, namely a $Z_0Z_1$ Pauli product for action $0$ and a $Z_2Z_3$ Pauli product for action $1$. Both observables are re-scaled so their expectation values are in $[0,1]$ and weighted by an action-specific weight. To implement the re-scaling and weighting of the Pauli products, you can define again an extra `tf.keras.layers.Layer` that stores the action-specific weights and applies them multiplicatively on the expectation values $\left(1+\langle Z_0Z_1 \rangle_{s,\theta}\right)/2$ and $\left(1+\langle Z_2Z_3 \rangle_{s,\theta}\right)/2$.
```
class Rescaling(tf.keras.layers.Layer):
def __init__(self, input_dim):
super(Rescaling, self).__init__()
self.input_dim = input_dim
self.w = tf.Variable(
initial_value=tf.ones(shape=(1,input_dim)), dtype="float32",
trainable=True, name="obs-weights")
def call(self, inputs):
return tf.math.multiply((inputs+1)/2, tf.repeat(self.w,repeats=tf.shape(inputs)[0],axis=0))
```
Prepare the definition of your PQC and its observables:
```
n_qubits = 4 # Dimension of the state vectors in CartPole
n_layers = 5 # Number of layers in the PQC
n_actions = 2 # Number of actions in CartPole
qubits = cirq.GridQubit.rect(1, n_qubits)
ops = [cirq.Z(q) for q in qubits]
observables = [ops[0]*ops[1], ops[2]*ops[3]] # Z_0*Z_1 for action 0 and Z_2*Z_3 for action 1
```
Define a `tf.keras.Model` that, similarly to the PQC-policy model, constructs a Q-function approximator that is used to generate the main and target models of our Q-learning agent.
```
def generate_model_Qlearning(qubits, n_layers, n_actions, observables, target):
"""Generates a Keras model for a data re-uploading PQC Q-function approximator."""
input_tensor = tf.keras.Input(shape=(len(qubits), ), dtype=tf.dtypes.float32, name='input')
re_uploading_pqc = ReUploadingPQC(qubits, n_layers, observables, activation='tanh')([input_tensor])
process = tf.keras.Sequential([Rescaling(len(observables))], name=target*"Target"+"Q-values")
Q_values = process(re_uploading_pqc)
model = tf.keras.Model(inputs=[input_tensor], outputs=Q_values)
return model
model = generate_model_Qlearning(qubits, n_layers, n_actions, observables, False)
model_target = generate_model_Qlearning(qubits, n_layers, n_actions, observables, True)
model_target.set_weights(model.get_weights())
tf.keras.utils.plot_model(model, show_shapes=True, dpi=70)
tf.keras.utils.plot_model(model_target, show_shapes=True, dpi=70)
```
You can now implement the deep Q-learning algorithm and test it on the CartPole-v1 environment. For the policy of the agent, you can use an $\varepsilon$-greedy policy:
$$ \pi(a|s) =
\begin{cases}
\delta_{a,\text{argmax}_{a'} Q_\theta(s,a')}\quad \text{w.p.}\quad 1 - \varepsilon\\
\frac{1}{\text{num_actions}}\quad \quad \quad \quad \text{w.p.}\quad \varepsilon
\end{cases} $$
where $\varepsilon$ is multiplicatively decayed at each episode of interaction.
Start by defining a function that performs an interaction step in the environment:
```
def interact_env(state, model, epsilon, n_actions, env):
# Preprocess state
state_array = np.array(state)
state = tf.convert_to_tensor([state_array])
# Sample action
coin = np.random.random()
if coin > epsilon:
q_vals = model([state])
action = int(tf.argmax(q_vals[0]).numpy())
else:
action = np.random.choice(n_actions)
# Apply sampled action in the environment, receive reward and next state
next_state, reward, done, _ = env.step(action)
interaction = {'state': state_array, 'action': action, 'next_state': next_state.copy(),
'reward': reward, 'done':float(done)}
return interaction
```
and a function that updates the Q-function using a batch of interactions:
```
@tf.function
def Q_learning_update(states, actions, rewards, next_states, done, model, gamma, n_actions):
states = tf.convert_to_tensor(states)
actions = tf.convert_to_tensor(actions)
rewards = tf.convert_to_tensor(rewards)
next_states = tf.convert_to_tensor(next_states)
done = tf.convert_to_tensor(done)
# Compute their target q_values and the masks on sampled actions
future_rewards = model_target([next_states])
target_q_values = rewards + (gamma * tf.reduce_max(future_rewards, axis=1)
* (1.0 - done))
masks = tf.one_hot(actions, n_actions)
# Train the model on the states and target Q-values
with tf.GradientTape() as tape:
tape.watch(model.trainable_variables)
q_values = model([states])
q_values_masked = tf.reduce_sum(tf.multiply(q_values, masks), axis=1)
loss = tf.keras.losses.Huber()(target_q_values, q_values_masked)
# Backpropagation
grads = tape.gradient(loss, model.trainable_variables)
for optimizer, w in zip([optimizer_in, optimizer_var, optimizer_out], [w_in, w_var, w_out]):
optimizer.apply_gradients([(grads[w], model.trainable_variables[w])])
```
Define the hyperparameters:
```
gamma = 0.99
n_episodes = 2000
# Define replay memory
max_memory_length = 10000 # Maximum replay length
replay_memory = deque(maxlen=max_memory_length)
epsilon = 1.0 # Epsilon greedy parameter
epsilon_min = 0.01 # Minimum epsilon greedy parameter
decay_epsilon = 0.99 # Decay rate of epsilon greedy parameter
batch_size = 16
steps_per_update = 10 # Train the model every x steps
steps_per_target_update = 30 # Update the target model every x steps
```
Prepare the optimizers:
```
optimizer_in = tf.keras.optimizers.Adam(learning_rate=0.001, amsgrad=True)
optimizer_var = tf.keras.optimizers.Adam(learning_rate=0.001, amsgrad=True)
optimizer_out = tf.keras.optimizers.Adam(learning_rate=0.1, amsgrad=True)
# Assign the model parameters to each optimizer
w_in, w_var, w_out = 1, 0, 2
```
Now implement the main training loop of the agent.
Note: This agent may need to simulate several million quantum circuits and can take as much as ~40 minutes to finish training.
```
env = gym.make("CartPole-v1")
episode_reward_history = []
step_count = 0
for episode in range(n_episodes):
episode_reward = 0
state = env.reset()
while True:
# Interact with env
interaction = interact_env(state, model, epsilon, n_actions, env)
# Store interaction in the replay memory
replay_memory.append(interaction)
state = interaction['next_state']
episode_reward += interaction['reward']
step_count += 1
# Update model
if step_count % steps_per_update == 0:
# Sample a batch of interactions and update Q_function
training_batch = np.random.choice(replay_memory, size=batch_size)
Q_learning_update(np.asarray([x['state'] for x in training_batch]),
np.asarray([x['action'] for x in training_batch]),
np.asarray([x['reward'] for x in training_batch], dtype=np.float32),
np.asarray([x['next_state'] for x in training_batch]),
np.asarray([x['done'] for x in training_batch], dtype=np.float32),
model, gamma, n_actions)
# Update target model
if step_count % steps_per_target_update == 0:
model_target.set_weights(model.get_weights())
# Check if the episode is finished
if interaction['done']:
break
# Decay epsilon
epsilon = max(epsilon * decay_epsilon, epsilon_min)
episode_reward_history.append(episode_reward)
if (episode+1)%10 == 0:
avg_rewards = np.mean(episode_reward_history[-10:])
print("Episode {}/{}, average last 10 rewards {}".format(
episode+1, n_episodes, avg_rewards))
if avg_rewards >= 500.0:
break
```
Plot the learning history of the agent:
```
plt.figure(figsize=(10,5))
plt.plot(episode_reward_history)
plt.xlabel('Epsiode')
plt.ylabel('Collected rewards')
plt.show()
```
Similarly to the plot above, you should see that after ~1000 episodes, the performance of the agent gets close to optimal, i.e., 500 rewards per episode. Learning takes longer for Q-learning agents since the Q-function is a "richer" function to be learned than the policy.
## 4. Exercise
Now that you have trained two different types of models, try experimenting with different environments (and different numbers of qubits and layers). You could also try combining the PQC models of the last two sections into an [actor-critic agent](https://lilianweng.github.io/lil-log/2018/04/08/policy-gradient-algorithms.html#actor-critic).
| github_jupyter |
# 04: Matrix - An Exercise in Parallelism
An early use for Spark has been Machine Learning. Spark's `MLlib` of algorithms contains classes for vectors and matrices, which are important for many ML algorithms. This exercise uses a simpler representation of matrices to explore another topic; explicit parallelism.
The sample data is generated internally; there is no input that is read. The output is written to the file system as before.
See the corresponding Spark job [Matrix4.scala](https://github.com/deanwampler/spark-scala-tutorial/blob/master/src/main/scala/sparktutorial/Matrix4.scala).
Let's start with a class to represent a Matrix.
```
/**
* A special-purpose matrix case class. Each cell is given the value
* i*N + j for indices (i,j), counting from 0.
* Note: Must be serializable, which is automatic for case classes.
*/
case class Matrix(m: Int, n: Int) {
assert(m > 0 && n > 0, "m and n must be > 0")
private def makeRow(start: Long): Array[Long] =
Array.iterate(start, n)(i => i+1)
private val repr: Array[Array[Long]] =
Array.iterate(makeRow(0), m)(rowi => makeRow(rowi(0) + n))
/** Return row i, <em>indexed from 0</em>. */
def apply(i: Int): Array[Long] = repr(i)
/** Return the (i,j) element, <em>indexed from 0</em>. */
def apply(i: Int, j: Int): Long = repr(i)(j)
private val cellFormat = {
val maxEntryLength = (m*n - 1).toString.length
s"%${maxEntryLength}d"
}
private def rowString(rowI: Array[Long]) =
rowI map (cell => cellFormat.format(cell)) mkString ", "
override def toString = repr map rowString mkString "\n"
}
```
Some variables:
```
val nRows = 5
val nCols = 10
val out = "output/matrix4"
```
Let's create a matrix.
```
val matrix = Matrix(nRows, nCols)
```
With a Scala data structure like this, we can use `SparkContext.parallelize` to convert it into an `RDD`. In this case, we'll actually create an `RDD` with a count of indices for the number of rows, `1 to nRows`. Then we'll map over that `RDD` and use it compute the average of each row's columns. Finally, we'll "collect" the results back to an `Array` in the driver.
```
val sums_avgs = sc.parallelize(1 to nRows).map { i =>
// Matrix indices count from 0.
val sum = matrix(i-1) reduce (_ + _) // Recall that "_ + _" is the same as "(i1, i2) => i1 + i2".
(sum, sum/nCols) // We'll return RDD[(sum, average)]
}.collect // ... then convert to an array
```
## Recap
`RDD.parallelize` is a convenient way to convert a data structure into an RDD.
## Exercises
### Exercise 1: Try different values of nRows and nCols
### Exercise 2: Try other statistics, like standard deviation
The code for the standard deviation that you would add is the following:
```scala
val row = matrix(i-1)
...
val sumsquares = row.map(x => x*x).reduce(_+_)
val stddev = math.sqrt(1.0*sumsquares) // 1.0* => so we get a Double for the sqrt!
```
Given the synthesized data in the matrix, are the average and standard deviation actually very meaningful here, if this were representative of real data?
| github_jupyter |
# Text Analysis - Dictionary of the Spanish language
- **Created by: Andrés Segura-Tinoco**
- **Created on: Aug 20, 2020**
- **Created on: Aug 02, 2021**
- **Data: Dictionary of the Spanish language**
### Text Analysis
1. Approximate number of words in the DSL
2. Number of words with acute accent in Spanish language
3. Frequency of words per size
4. Top 15 bigger words
5. Frequency of letters in DSL words
6. Vowel and consonant ratio
7. Frequency of words per letter of the alphabet
8. Most frequent n-grams
```
# Load Python libraries
import re
import codecs
import pandas as pd
from collections import Counter
# Import plot libraries
import matplotlib.pyplot as plt
```
### Util functions
```
# Util function - Read a plain text file
def read_file_lines(file_path):
lines = []
with codecs.open(file_path, encoding='utf-8') as f:
for line in f:
lines.append(line)
return lines
# Util function - Apply data quality to words
def apply_dq_word(word):
new_word = word.replace('\n', '')
# Get first token
if ',' in new_word:
new_word = new_word.split(',')[0]
# Remove extra whitespaces
new_word = new_word.strip()
# Remove digits
while re.search("\d", new_word):
new_word = new_word[0:len(new_word)-1]
return new_word
# Util function - Plot column chart
def plot_col_chart(df, figsize, x_var, y_var, title, color='#1f77b4', legend=None, x_label=None):
fig, ax = plt.subplots()
df.plot.bar(ax=ax, x=x_var, y=y_var, color=color, figsize=figsize)
if legend:
ax.legend(legend)
else:
ax.get_legend().remove()
if x_label:
x = np.arange(len(x_label))
plt.xticks(x, x_label, rotation=0)
else:
plt.xticks(rotation=0)
plt.title(title, fontsize=16)
plt.xlabel(x_var.capitalize())
plt.ylabel(y_var.capitalize())
plt.show()
# Util function - Plot bar chart
def plot_bar_chart(df, figsize, x_var, y_var, title, color='#1f77b4', legend=None):
fig, ax = plt.subplots()
df.plot.barh(ax=ax, x=x_var, y=y_var, figsize=figsize)
if legend:
ax.legend(legend)
else:
ax.get_legend().remove()
plt.title(title, fontsize=16)
plt.xlabel(y_var.capitalize())
plt.ylabel(x_var.capitalize())
plt.show()
```
## 1. Approximate number of words in the DSL
```
# Range of files by first letter of word
letter_list = list(map(chr, range(97, 123)))
letter_list.append('ñ')
len(letter_list)
# Read words by letter [a-z]
word_dict = Counter()
file_path = '../data/dics/'
# Read data only first time
for letter in letter_list:
filename = file_path + letter + '.txt'
word_list = read_file_lines(filename)
for word in word_list:
word = apply_dq_word(word)
word_dict[word] += 1
# Show results
n_words = len(word_dict)
print('Total of different words: %d' % n_words)
```
## 2. Number of words with acute accent in Spanish language
```
# Counting words with acute accent
aa_freq = Counter()
regexp = re.compile('[áéíóúÁÉÍÓÚ]')
for word in word_dict.keys():
match = regexp.search(word.lower())
if match:
l = match.group(0)
aa_freq[l] += 1
# Show results
count = sum(aa_freq.values())
perc_words = 100.0 * count / n_words
print('Total words with acute accent: %d (%0.2f %s)' % (count, perc_words, '%'))
# Cooking dataframe
df = pd.DataFrame.from_records(aa_freq.most_common(), columns = ['vowel', 'frequency']).sort_values(by=['vowel'])
df['perc'] = round(100.0 * df['frequency'] / count, 2)
df
# Plotting data
figsize = (12, 6)
x_var = 'vowel'
y_var = 'perc'
title = 'Frequency of accented vowels'
plot_col_chart(df, figsize, x_var, y_var, title)
```
## 3. Frequency of words per size
```
# Processing
word_size = Counter()
for word in word_dict.keys():
size = len(word)
word_size[size] += 1
# Cooking dataframe
df = pd.DataFrame.from_records(word_size.most_common(), columns = ['size', 'frequency']).sort_values(by=['size'])
df['perc'] = 100.0 * df['frequency'] / n_words
df
# Plotting data
figsize = (12, 6)
x_var = 'size'
y_var = 'frequency'
title = 'Frequency of words per size'
plot_col_chart(df, figsize, x_var, y_var, title)
```
## 4. Top 15 bigger words
```
# Processing
top_size = Counter()
threshold = 21
for word in word_dict.keys():
size = len(word)
if size >= threshold:
top_size[word] = size
# Top 15 bigger words
top_size.most_common()
```
## 5. Frequency of letters in DSL words
```
# Processing
letter_freq = Counter()
for word in word_dict.keys():
word = word.lower()
for l in word:
letter_freq[l] += 1
n_total = sum(letter_freq.values())
n_total
# Cooking dataframe
df = pd.DataFrame.from_records(letter_freq.most_common(), columns = ['letter', 'frequency']).sort_values(by=['letter'])
df['perc'] = 100.0 * df['frequency'] / n_total
df
# Plotting data
figsize = (12, 6)
x_var = 'letter'
y_var = 'frequency'
title = 'Letter frequency in DSL words'
plot_col_chart(df, figsize, x_var, y_var, title)
# Plotting sorted data
figsize = (12, 6)
x_var = 'letter'
y_var = 'perc'
title = 'Letter frequency in DSL words (Sorted)'
color = '#2ca02c'
plot_col_chart(df.sort_values(by='perc', ascending=False), figsize, x_var, y_var, title, color)
```
## 6. Vowel and consonant ratio
```
vowel_list = 'aeiouáéíóúèîü'
vowel_total = 0
consonant_total = 0
for ix, row in df.iterrows():
letter = str(row['letter'])
freq = int(row['frequency'])
if letter in vowel_list:
vowel_total += freq
elif letter.isalpha():
consonant_total += freq
letter_total = vowel_total + consonant_total
# Initialize list of lists
data = [['vowels', vowel_total, (100.0 * vowel_total / letter_total)],
['consonant', consonant_total, (100.0 * consonant_total / letter_total)]]
# Create the pandas DataFrame
df = pd.DataFrame(data, columns = ['type', 'frequency', 'perc'])
df
# Plotting data
figsize = (6, 6)
x_var = 'type'
y_var = 'perc'
title = 'Vowel and consonant ratio'
plot_col_chart(df, figsize, x_var, y_var, title)
```
## 7. Frequency of words per letter of the alphabet
```
norm_dict = {'á':'a', 'é':'e', 'í':'i', 'ó':'o', 'ú':'u'}
# Processing
first_letter_freq = Counter()
for word in word_dict.keys():
first_letter = word[0].lower()
if first_letter.isalpha():
if first_letter in norm_dict.keys():
first_letter = norm_dict[first_letter]
first_letter_freq[first_letter] += 1
# Cooking dataframe
df = pd.DataFrame.from_records(first_letter_freq.most_common(), columns = ['letter', 'frequency']).sort_values(by=['letter'])
df['perc'] = 100.0 * df['frequency'] / n_words
df
# Plotting data
figsize = (12, 6)
x_var = 'letter'
y_var = 'frequency'
title = 'Frequency of words per letter of the alphabet'
plot_col_chart(df, figsize, x_var, y_var, title)
# Plotting sorted data
figsize = (12, 6)
x_var = 'letter'
y_var = 'perc'
title = 'Frequency of words per letter of the alphabet (Sorted)'
color = '#2ca02c'
plot_col_chart(df.sort_values(by='perc', ascending=False), figsize, x_var, y_var, title, color)
```
## 8. Most frequent n-grams
```
# Processing
top_ngrams = 25
bi_grams = Counter()
tri_grams = Counter()
for word in word_dict.keys():
word = word.lower()
n = len(word)
size = 2
for i in range(size, n+1):
n_grams = word[i-size:i]
bi_grams[n_grams] += 1
size = 3
for i in range(size, n+1):
n_grams = word[i-size:i]
tri_grams[n_grams] += 1
# Cooking dataframe
df_bi = pd.DataFrame.from_records(bi_grams.most_common(top_ngrams), columns=['bi-grams', 'frequency'])
df_tri = pd.DataFrame.from_records(tri_grams.most_common(top_ngrams), columns=['tri-grams', 'frequency'])
# Plotting sorted data
figsize = (8, 10)
x_var = 'bi-grams'
y_var = 'frequency'
title = str(top_ngrams) + ' bi-grams most frequent in Spanish'
plot_bar_chart(df_bi.sort_values(by=['frequency']), figsize, x_var, y_var, title)
# Plotting sorted data
figsize = (8, 10)
x_var = 'tri-grams'
y_var = 'frequency'
title = str(top_ngrams) + ' tri-grams most frequent in Spanish'
plot_bar_chart(df_tri.sort_values(by=['frequency']), figsize, x_var, y_var, title)
```
---
<a href="https://ansegura7.github.io/DSL_Analysis/">« Home</a>
| github_jupyter |
```
import pandas as pds
import sklearn as skl
import seaborn as sns
import numpy as num
planetas = pds.read_csv("cumulative.csv")
planetas.replace('',num.nan,inplace = True)
planetas.dropna(inplace = True)
```
# Para demonstrarmos o funcionamento das diferentes funções de Kernel num SVM, vamos pegar um database não-trivial e tentar classifica-lo com o nosso modelo.
* Para isso, escolhemos o database da NASA: "Kepler Exoplanet Search Results"
* Nesse database, temos informações sobre diversos planetas detectados pelo telescópio Kepler, cujo foco é de encontrar exoplanetas (planetas que orbitam estrelas que não o Sol) pelo universo.
# Na coluna *kResult*, temos a situação do planeta ID:
* É um exoplaneta, portanto CONFIRMED.
* Não é um exoplaneta, portanto FALSE POSITIVE.
* Talvez seja um exoplaneta, portanto CANDIDATE.
# Vamos criar um modelo que classifique se um planeta é/pode ser um exoplaneta ou não.
```
planetas.head(20)
```
# Percebemos claramente, nós gráficos em pares abaixo, que as amostras do nosso database não são nada triviais, e nem linearmente separáveis. Portanto, caso queiramos utilizar o SVM para classificar os dados, teremos que testar diferentes funções de Kernel e molda-las para uma melhor precisão.
```
sns.pairplot(planetas.head(1000), vars = planetas.columns[8:14],hue = 'kResult')
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn import svm
yPlanetas = planetas['kResult'].copy()
xPlanetas = planetas.drop(['kResult','kName','kID','Not Transit-Like Flag','Stellar Eclipse Flag','Centroid Offset Flag','Ephemeris Match Indicates Contamination Flag'],axis = 1)
xTreino, xTeste, yTreino, yTeste = train_test_split(xPlanetas, yPlanetas, test_size=0.80, random_state=3)
```
# Aqui criaremos 4 modelos com funções de kernel diferentes (parâmetro *kernel*) e treinaremos cada um deles com nossos dados de treino para no fim escolher o de precisão mais satisfatória.
```
modeloLinear = svm.SVC(kernel = 'linear')
modeloPoly = svm.SVC(kernel = 'poly')
modeloRBF = svm.SVC(kernel = 'rbf')
modeloSigmoid = svm.SVC(kernel = 'sigmoid')
```
<img src = "./SVM-Kernel-Function-Types.png">
```
modeloLinear.fit(xTreino,yTreino)
modeloPoly.fit(xTreino,yTreino)
modeloRBF.fit(xTreino,yTreino)
modeloSigmoid.fit(xTreino,yTreino)
```
# Aqui iremos mostrar o "score" de cada um dos nossos modelos, isto é, o quão preciso o modelo foi em relação à realidade, e os coeficientes da função de decisão.
* Perceba que os modelos Linear, Polinomial e RBF tiveram eficiência muito próxima, o que indica a complexidade do database. Portanto, para melhorar a precisão, teremos que manualmente testar os parâmetros (ou *coeficientes* ) de cada um dos modelos.
* Perceba também que a pontuação média de 60% indica que nosso modelo tem uma certa eficiência considerável, já o dobro da eficiência esperada para um modelo aleátorio (pontuação em torno de 30%). Isso demonstra que o truque de kernel é efetivo na manipulação de dados extremamente complexos como o utilizado.
```
print(" Score = ",modeloLinear.score(xTeste,yTeste), "\n")
print(" Coeficientes da função de decisão: \n\n",modeloLinear.decision_function(xTeste))
print(" Score = ",modeloPoly.score(xTeste,yTeste), "\n")
print(" Coeficientes da função de decisão: \n\n",modeloPoly.decision_function(xTeste))
print(" Score = ",modeloRBF.score(xTeste,yTeste), "\n")
print(" Coeficientes da função de decisão: \n\n",modeloRBF.decision_function(xTeste))
print(" Score = ",modeloSigmoid.score(xTeste,yTeste), "\n")
print(" Coeficientes da função de decisão: \n\n",modeloSigmoid.decision_function(xTeste))
```
| github_jupyter |
```
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
#from PIL import Image
from scipy import ndimage
import tensorflow as tf
from tensorflow.python.framework import ops
from cnn_utils import *
from sklearn.decomposition import PCA
from scipy.stats.mstats import zscore # This is to standardized the parameters
from keras.callbacks import ModelCheckpoint
from keras.models import Model, load_model, Sequential
from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D, Conv2D
from keras.layers import GRU, Bidirectional, BatchNormalization, Reshape
from keras.layers import Flatten
from keras.optimizers import Adam
%matplotlib inline
np.random.seed(1)
```
## 1. Load Data
```
import math as M
from matplotlib import mlab
from matplotlib.colors import Normalize
from obspy.imaging.cm import obspy_sequential
import matplotlib.pyplot as plt
from skimage.transform import resize
import scipy
def getSpectogram(data, samp_rate, per_lap=0.9, wlen=None, log=False,
outfile=None, fmt=None, axes=None, dbscale=False,
mult=8.0, zorder=None, title=None,
show=True, sphinx=False, clip=[0.0, 1.0]):
# enforce float for samp_rate
samp_rate = float(samp_rate)
# set wlen from samp_rate if not specified otherwise
if not wlen:
wlen = samp_rate / 100.
npts = len(data)
# nfft needs to be an integer, otherwise a deprecation will be raised
# XXX add condition for too many windows => calculation takes for ever
nfft = int(_nearest_pow_2(wlen * samp_rate))
if nfft > npts:
nfft = int(_nearest_pow_2(npts / 8.0))
if mult is not None:
mult = int(_nearest_pow_2(mult))
mult = mult * nfft
nlap = int(nfft * float(per_lap))
data = data - data.mean()
end = npts / samp_rate
specgram, freq, time = mlab.specgram(data, Fs=samp_rate, NFFT=nfft,
pad_to=mult, noverlap=nlap)
# db scale and remove zero/offset for amplitude
if dbscale:
specgram = 10 * np.log10(specgram[1:, :])
else:
specgram = np.sqrt(specgram[1:, :])
freq = freq[1:]
vmin, vmax = clip
if vmin < 0 or vmax > 1 or vmin >= vmax:
msg = "Invalid parameters for clip option."
raise ValueError(msg)
_range = float(specgram.max() - specgram.min())
vmin = specgram.min() + vmin * _range
vmax = specgram.min() + vmax * _range
norm = Normalize(vmin, vmax, clip=True)
return freq,time,specgram
def _nearest_pow_2(x):
"""
Find power of two nearest to x
>>> _nearest_pow_2(3)
2.0
>>> _nearest_pow_2(15)
16.0
:type x: float
:param x: Number
:rtype: Int
:return: Nearest power of 2 to x
"""
a = M.pow(2, M.ceil(np.log2(x)))
b = M.pow(2, M.floor(np.log2(x)))
if abs(a - x) < abs(b - x):
return a
else:
return b
events = np.load("NewDatasets/Data_D11.npy")
label = np.load("NewDatasets/Label_D11.npy")
times = np.load("NewDatasets/Time_D11.npy")
events=events.reshape([events.shape[0],events.shape[1]])
times=times[:,:label.shape[0]]
#print(times)
#events = np.load("Datasets/DataDetection_M_2.8_R_0.5_S_4_Sec_256.npy")
#label = np.load("Datasets/LabelDetection_M_2.8_R_0.5_S_4_Sec_256.npy")
#times=np.load("Datasets/TimeDetection_M_2.8_R_0.5_S_4_Sec_256.npy")
print(events.shape)
print(label.shape)
print(times.shape)
#times = np.load("Datasets/TimeDetection_M_2.8_R_0.5_S_4_Sec_256.npy") # features, # samples
times = (times - times[0,:]) * 3600 * 24 # set time to 0 and in seconds
fs = (times[:,0] < 60).nonzero()[0].shape[0] / 60 # sampling frequency
print(fs)
fs=100
from scipy.signal import spectrogram
eventNumber = 0
freq , time, Sxx = getSpectogram(events[:,eventNumber], fs, dbscale = True)
#Sxx = scipy.misc.imresize(Sxx, [64, 64])
Sxx = scipy.misc.imresize(Sxx, [64, 128])
spectrogram_shape = Sxx.shape
print(spectrogram_shape)
print(events.shape)
print(label.shape)
plt.imshow(Sxx)
print(label[eventNumber])
plt.figure()
plt.plot(events[:,0])
plt.figure()
plt.plot(events[:,600])
plt.figure()
plt.plot(events[:,-1])
print(label[0])
print(label[600])
print(label[-1])
label[0]
print(events.shape)
print(label.shape)
print(times.shape)
#print(label.shape[0])
#0:label.shape[0]
print(times.shape)
print(fs)
print(times)
data = np.zeros((events.shape[1], spectrogram_shape[0], spectrogram_shape[1]))
for i in range(events.shape[1]):
_, _, Sxx = getSpectogram(events[:,i], fs)
Sxx = scipy.misc.imresize(Sxx, [64, 128])
data[i, :, :] = (Sxx - np.mean(Sxx)) / np.std(Sxx)
#data[i, :, :] = zscore(np.log10(Sxx))
data = data[:,:,:,np.newaxis]
def split_reshape_dataset(X, Y, ratio):
#X = X.T[:,:,np.newaxis, np.newaxis]
#Y = Y.T
m = X.shape[0] # number of samples
sortInd = np.arange(m)
np.random.shuffle(sortInd)
nTrain = int(ratio * m)
X_train = X[sortInd[:nTrain], :, :, :]
Y_train = Y[sortInd[:nTrain],:]
X_test = X[sortInd[nTrain:], :, :, :]
Y_test = Y[sortInd[nTrain:],:]
return X_train, X_test, Y_train, Y_test
#data = data[300:700,:]
#data = (data - np.mean(data, axis = 0, keepdims= True)) / np.std(data, axis = 0, keepdims = True)
#data=zscore(data)
RatioTraining=0.8; # 0.8 before
X_train, X_test, Y_train, Y_test = split_reshape_dataset(data, label, RatioTraining)
Y_train =convert_to_one_hot(Y_train,2).T
Y_test = convert_to_one_hot(Y_test,2).T
print(X_train.shape)
print(Y_train.shape)
print(data.shape)
print(label.shape)
i = 104
def ComputeModel(input_shape):
"""
Function creating the model's graph in Keras.
Argument:
input_shape -- shape of the model's input data (using Keras conventions)
Returns:
model -- Keras model instance
"""
X_input = Input(shape = input_shape)
# keras.layers.Conv1D(filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, activation=None,
# Step 1: CONV layer
X = Conv1D(filters=196,kernel_size=16,strides=4)(X_input) #None # CONV1D
X = BatchNormalization()(X) #None # Batch normalization
X = Activation(activation='relu')(X) #None # ReLu activation
X = Dropout(rate=0.8)(X) #None # dropout (use 0.8)
# Step 2: First GRU Layer
X = GRU(units=128, return_sequences=True)(X)#None # GRU (use 128 units and return the sequences)
X = Dropout(rate=.8)(X) #None # dropout (use 0.8)
X = BatchNormalization()(X) #None # Batch normalization
# Step 3: Second GRU Layer
X = GRU(units=128, return_sequences=True)(X) #None # GRU (use 128 units and return the sequences)
X = Dropout(rate=0.8)(X) #None # dropout (use 0.8)
X = BatchNormalization()(X) #None # Batch normalization
X = Dropout(rate=0.8)(X) #None # dropout (use 0.8)
# Step 3: Second GRU Layer
'''
X = GRU(units=128, return_sequences=True)(X) #None # GRU (use 128 units and return the sequences)
X = Dropout(rate=0.8)(X) #None # dropout (use 0.8)
X = BatchNormalization()(X) #None # Batch normalization
X = Dropout(rate=0.8)(X) #None
X = GRU(units=128, return_sequences=True)(X) #None # GRU (use 128 units and return the sequences)
X = Dropout(rate=0.8)(X) #None # dropout (use 0.8)
X = BatchNormalization()(X) #None # Batch normalization
X = Dropout(rate=0.8)(X) #None
'''
# Step 4: Time-distributed dense layer (≈1 line)
X = TimeDistributed(Dense(1, activation = "sigmoid"))(X) # time distributed (sigmoid)
X = Flatten()(X)
X = Dense(1, activation = "sigmoid")(X) # time distributed (sigmoid)
### END CODE HERE ###
model = Model(inputs = X_input, outputs = X)
return model
Tx=2.5E-2
print(spectrogram_shape)
print(X_train.shape)
print(spectrogram_shape)
model = ComputeModel(input_shape = (spectrogram_shape[0],spectrogram_shape[1]))
model.summary()
opt = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, decay=0.01)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=["accuracy"])
#X_train_reshape=np.squeeze(X_train)
#X_train_reshape.shape
Y_train.shape
Y_train2=Y_train[:,0]
#Y_train2=Y_train2[:,np.newaxis,np.newaxis]
print(np.squeeze(X_train).shape)
print(Y_train2.shape)
model.fit(np.squeeze(X_train), Y_train2, batch_size = 50, epochs=200)
loss, acc = model.evaluate(np.squeeze(X_test), Y_test[:,0])
print("Dev set accuracy = ", acc)
```
| github_jupyter |
# Federated Learning with Clara Train SDK
Medical data is sensitive and needs to be protected. And even after anonymization processes,
it is often infeasible to collect and share patient data from several institutions in a centralised data lake.
This poses challenges for training machine learning algorithms, such as deep convolutional networks,
which require extensive and balanced data sets for training and validation.
Federated learning (FL) is a learning paradigm that sidesteps this difficulty:
instead of pooling the data, the machine learning process is executed locally at each participating institution and only intermediate model training updates are shared among them.
It thereby allows to train algorithms collaboratively without exchanging the underlying datasets and neatly addresses the problem of data governance and privacy that arise when pooling medical data.
There are different FL communication architectures, such as the Client-server approach via hub and spokes, a decentralized architecture via peer-to-peer or hybrid variants.
The FL tool in the Clara Train SDK is a client-server architecture,
in which a federated server manages the aggregation and distribution as shown below.
<br><br>
## Prerequisites
- None. This note book explains FL.
## Resources
You could watch the free GTC 2021 talks covering Clara Train SDK
- [Clara Train 4.0 - 101 Getting Started [SE2688]](https://gtc21.event.nvidia.com/media/Clara%20Train%204.0%20-%20101%20Getting%20Started%20%5BSE2688%5D/1_0qgfrql2)
- [Clara Train 4.0 - 201 Federated Learning [SE3208]](https://gtc21.event.nvidia.com/media/Clara%20Train%204.0%20-%20201%20Federated%20Learning%20%5BSE3208%5D/1_m48t6b3y)
- [What’s New in Clara Train 4.0 [D3114]](https://gtc21.event.nvidia.com/media/What%E2%80%99s%20New%20in%20Clara%20Train%204.0%20%5BD3114%5D/1_umvjidt2)
- [Take Medical AI from Concept to Production using Clara Imaging [S32482]](https://gtc21.event.nvidia.com/media/Take%20Medical%20AI%20from%20Concept%20to%20Production%20using%20Clara%20Imaging%20%20%5BS32482%5D/1_6bvnvyg7)
- [Federated Learning for Medical AI [S32530]](https://gtc21.event.nvidia.com/media/Federated%20Learning%20for%20Medical%20AI%20%5BS32530%5D/1_z26u15uk)
# Why use Clara FL
<br><img src="screenShots/WhyUseClaraFL.png" alt="Drawing" style="height: 400px;width: 600px"/><br>
### Resources
- Watch talk covering Clara Train SDK basics [S22563](https://developer.nvidia.com/gtc/2020/video/S22563)
Clara train Getting started: cover basics, BYOC, AIAA, AutoML
- GTC 2020 talk [Federated Learning for Medical Imaging: Collaborative AI without Sharing Patient Data](https://developer.nvidia.com/gtc/2020/video/s21536-vid)
- [Federated learning blog](https://blogs.nvidia.com/blog/2019/10/13/what-is-federated-learning/)
- [Federated learning blog at RSNA](https://blogs.nvidia.com/blog/2019/12/01/clara-federated-learning/)
### Resources
We encourage you to watch the free GTC 2021 talks covering Clara Train SDK
- [Clara Train 4.0 - 201 Federated Learning [SE3208]](https://gtc21.event.nvidia.com/media/Clara%20Train%204.0%20-%20201%20Federated%20Learning%20%5BSE3208%5D/1_m48t6b3y)
- [Federated Learning for Medical AI [S32530]](https://gtc21.event.nvidia.com/media/Federated%20Learning%20for%20Medical%20AI%20%5BS32530%5D/1_z26u15uk)
# Overview
Federated Learning in Clara Train SDK uses a client-server architecture.
The image below gives you an overview.
For details about the components, please see our [documentation](https://docs.nvidia.com/clara/tlt-mi/clara-train-sdk-v4.0/nvmidl/additional_features/federated_learning.html?highlight=federated).
The key things to note are:
* A server is responsible for **managing training, keeping best model and aggregating gradients**.
* Clients are responsible for **training local model** and sending updates (gradients) to server.
* **No data from the dataset is shared** between clients or with server.
* To ensure **privacy**, all communication with server is secured.
* Additional privacy-preserving mechanisms can be enabled.
Figure below shows these concepts and how they are communicated to the server.
<br><img src="screenShots/FLDetails.png" alt="Drawing" style="height: 400px;width: 600px"/><br>
## Server
The following diagram shows the server workflow:
<br><img src="screenShots/fl_server_workflow.png" alt="Drawing" style="width: 1000px"/><br>
A federated server is responsible for:
1. Initialising a global model at federated round 0
1. Sharing the global model with all clients
1. Synchronising model updates from multiple clients
1. Updating the global model when sufficient model updates received
## Client
The following diagram shows the client workflow:
<br><img src="screenShots/fl_client_workflow.png" alt="Drawing" style="height: 350px"/><br>
A federated client will:
1. Download the global model
1. Train the model with local training data
1. Upload `delta_w` (the difference between the updated model and the global model) to the server
# FL Challenges
In order to run a federated learning experiment, you need think about:
1. Software development:
1. Security:
1. Secure connection
2. Authentication
3. Certification ssl
2. Deadlocks: Clients join / die / re-join
4. Unstable client server connection
5. Scaling: How to run large FL experiment with 20 or 100 clients
6. With different sites having different data size, how to enable local training with multiple GPUs, also how to give weights to different clients.
7. Audit trails: clients need to know who did what, when
2. Logistics: <span style="color:red">(Most challenging)</span>
1. FL experiment is typically conducted through multiple experiments to tune hyper parameters.
How to synchronize these runs
2. Keep track of experiments across sites.
3. FL most important feature is to improve the off diagonal metric.
Clients would share results (validation metric) and NOT the data.
<span style="color:red"> This is the hardest to do </span> since you need to distribute the models from each client to the rest.
3. Research:
1. How to aggregate model weights from each site
2. Privacy for your model weight sharing
Clara software engineers have taken care of the 1st and 2nd bullets for you so researchers can focus on the 3rd bullet.
Moreover, In Clara train V3.1, FL comes with a new provisioning tool what simplifies the process.
Lets start by defining some terminologies used throughout FL discussion:
- __Study__: An FL project with preset goals (e.g. train the EXAM model) and identified participants.
- __Org__: The organization that participates in the study.
- __Site__: The computing system that runs FL application as part of the study.
There are two kinds of sites: Server and Clients.
Each client belongs to an organization.
- __Provisioning Tool__: The tool used for provisioning all participants of the study.
- __FL Server__: An application responsible for client coordination based on FL federation rules and model aggregation.
- __FL Client__: An application running on a client site that performs model training with its local datasets and collaborates with the FL Server for federated study.
- __Admin Client__: An application running on a user’s machine that allows the user to perform FL system operations with a command line interface.
- __Lead IT__: The person responsible for provisioning the participants and coordinating IT personnel from all sites for the study.
The Lead IT is also responsible for the management of the Server.
- __Site IT__: The person responsible for the management of the Site of his/her organization.
- __Lead Researcher__: The scientist who works with Site Scientists to ensure the success of the study.
- __Site Researcher__: The scientist who works with the Lead Scientist to make sure the Site is properly prepared for the study.
NOTE: in certain projects, a person could play several of the above-mentioned roles.
<br><img src="screenShots/FLWorkFlow.png" alt="Drawing" style="height: 400px"/><br>
Diagram above shows high level steps of an FL study:
1. Lead IT configures everything in a config.yaml file, runs provisioning which will generate zip packages for each client.
2. These packages contains everything a FL clients needs from starting the docker, ssl certification, etc to start and complete the FL experiment.
3. Each client starts the docker and the FL client use the provided Startup Kit.
4. Similarly the FL server starts the docker and FL server using the provided Startup Kit.
5. Finally the Admin can either use docker or pip install the admin tool, which will connect to the server and start the FL experiment
## With this in mind we have created 3 sub-notebooks:
1. [Provisioning](Provisioning.ipynb) which walks you though the configurations you set and how to run the tool
2. [Client](Client.ipynb) walks you through a FL client
3. [Admin](Admin.ipynb) walks you through how the FL admin data scientist would conduct the FL experiment once the server and clients are up and running
| github_jupyter |
# 2.04 Figure 4
---
Author: Riley X. Brady
Date: 11/19/20
This plots the distribution of biogeochemical tracers at their statistical origin relative to their 1000 m crossing. See notebook `1.03` for the calculation of tracers at their memory time origin and `1.04` for finding their ambient mixed layer temperatures.
```
%load_ext lab_black
%load_ext autoreload
%autoreload 2
import numpy as np
import xarray as xr
import proplot as plot
import gsw
import PyCO2SYS as pyco2
print(f"numpy: {np.__version__}")
print(f"xarray: {xr.__version__}")
print(f"proplot: {plot.__version__}")
print(f"gsw: {gsw.__version__}")
print(f"pyCO2: {pyco2.__version__}")
REGIONS = ["drake", "crozet", "kerguelan", "campbell"]
```
First, we start with dissolved inorganic carbon. Our model outputs DIC with units of mmol m$^{-3}$. Here, we use *in situ* density to convert to $\mu$mol kg$^{-1}$ to account for density effects.
```
dicdata = []
for region_name in REGIONS:
region = xr.open_dataset(f"../data/postproc/{region_name}.1000m.tracer.origin.nc")
# Load into memory as a numpy array.
dic = region.DIC.values
S = region.S.values
T = region.T.values
z = region.z.values * -1
# Convert to kg m-3
rho = gsw.density.rho(S, T, z)
dic = dic[~np.isnan(dic)]
rho = rho[~np.isnan(rho)]
conversion = 1000 * (1 / rho)
dicdata.append(list(dic * conversion))
```
Next, we calculate potential density reference to the surface ($\sigma_{0}$). We use this as a simple marker for water mass types.
```
sigmadata = []
for region_name in REGIONS:
data = xr.open_dataset(f"../data/postproc/{region_name}.1000m.tracer.origin.nc")
T = data.T.values
T = T[~np.isnan(T)]
S = data.S.values
S = S[~np.isnan(S)]
sigma0 = gsw.sigma0(S, T)
sigmadata.append(list(sigma0))
```
Lastly, we calculate the potential pCO$_{2}$. This is the pCO$_{2}$ the particle would have if it were warmed or cooled to the ambient temperature it eventually experiences once it upwells into the mixed layer after its last 1000 m crossing. This assumes that there are no modifications to the carbon content of the particle due to air-sea gas exchange, biological processes, mixing, etc. It's a way to relate the pCO$_{2}$ at any point in its trajectory to the outgasing or uptake potential it would have upon reaching the surface ocean.
This works, since:
F$_{\mathrm{CO}_{2}}$ = k $\cdot$ S $\cdot$ (pCO$_{2}^{O}$ - pCO$_{2}^{A}$)
In other words, F$_{\mathrm{CO}_{2}}$ $\propto$ pCO$_{2}^{o}$, and it is particularly true in our simulation since we have a fixed atmospheric pCO$_{2}^{A}$ of 360 $mu$atm.
```
pco2sigmadata = []
for region_name in REGIONS:
# Load in tracers at origin and append on the ambient temperature calculated for each
# particle.
data = xr.open_dataset(f"../data/postproc/{region_name}.1000m.tracer.origin.nc")
ambient = xr.open_dataarray(
f"../data/postproc/{region_name}.ambient.temperature.nc"
)
data["T_ambient"] = ambient
# Calculate in situ density and temperature
rho = gsw.density.rho(data.S, data.T, data.z * -1)
t_insitu = gsw.pt_from_t(data.S, data.T, 0, data.z * -1)
# Use in situ density to convert all units from mmol m-3 to umol kg-1
conversion = 1000 * (1 / rho)
# PyCO2SYS is a famous package from MATLAB. We're using the python plug-in here
# to diagnostically calculate in situ pCO2 along the particle trajectory.
pCO2 = pyco2.CO2SYS_nd(
data.ALK * conversion,
data.DIC * conversion,
1,
2,
salinity=data.S,
temperature=t_insitu,
pressure=data.z * -1,
total_silicate=data.SiO3 * conversion,
total_phosphate=data.PO4 * conversion,
)["pCO2"]
# pCO2 if brought to the ambient temperature when the given particle upwells to
# 200 m. We're just using a known lab-derived relationship that pCO2 varies by
# ~4% for a degree change in temperature.f
pCO2sigma = pCO2 * (1 + 0.0423 * (data.T_ambient - t_insitu))
# Subtract out fixed atmospheric CO2 to get a gradient with the atmosphere.
pCO2sigma -= 360
pCO2sigma = pCO2sigma.dropna("nParticles").values
pco2sigmadata.append(list(pCO2sigma))
```
## Visualize
I could probably modularize this and make it a lot nicer, but who cares? It's a plot for the paper and I like it. **NOTE**: I did a little bit of adjustment of text in Illustrator after the fact.
```
# supresses some numpy warnings in the plot.
import warnings
warnings.filterwarnings("ignore")
plot.rc["abc.border"] = False
plot.rc.fontsize = 8
f, axs = plot.subplots(
ncols=3,
figsize=(6.9, 6.9 * 3.5 / 7.48),
share=0,
)
# Some global attributes for the plot
VIOLINCOLOR = "gray5"
EXTREMEMARKER = "."
EXTREMESIZE = 1.0
BOXWIDTH = 0.1
WHISKERLW = 0.5
CAPLW = 0.5
###########
# DIC PANEL
###########
axs[1].boxplot(
dicdata,
whis=(5, 95),
marker=EXTREMEMARKER,
markersize=EXTREMESIZE,
widths=BOXWIDTH,
fillcolor="black",
mediancolor="white",
boxcolor="black",
medianlw=1,
fillalpha=1,
caplw=CAPLW,
whiskerlw=WHISKERLW,
whiskercolor="black",
capcolor="black",
)
axs[1].violinplot(
dicdata,
fillcolor=VIOLINCOLOR,
fillalpha=1,
lw=0,
edgecolor=VIOLINCOLOR,
widths=0.65,
points=100,
)
axs[1].format(
yreverse=True,
xticklabels=REGIONS,
xtickminor=False,
xrotation=45,
ylabel="dissolved inorganic carbon [$\mu$mol kg$^{-1}$]",
)
#############
# SIGMA PANEL
#############
axs[0].boxplot(
sigmadata,
whis=(5, 95),
marker=EXTREMEMARKER,
markersize=EXTREMESIZE,
widths=BOXWIDTH,
fillcolor="black",
mediancolor="white",
boxcolor="black",
medianlw=1,
fillalpha=1,
caplw=CAPLW,
whiskerlw=WHISKERLW,
whiskercolor="black",
capcolor="black",
)
axs[0].violinplot(
sigmadata,
fillcolor=VIOLINCOLOR,
fillalpha=1,
lw=0,
edgecolor=VIOLINCOLOR,
widths=0.65,
points=100,
)
axs[0].area([0.5, 5], 26.5, 27.2, color="#edf8fb", alpha=0.25, zorder=0)
axs[0].area([0.5, 5], 27.2, 27.5, color="#b3cde3", alpha=0.25, zorder=0)
axs[0].area([0.5, 5], 27.5, 27.8, color="#8c96c6", alpha=0.25, zorder=0)
axs[0].area([0.5, 5], 27.8, 27.9, color="#88419d", alpha=0.25, zorder=0)
axs[0].text(4.05, 26.55, "SAMW", color="k")
axs[0].text(4.2, 27.25, "AAIW", color="k")
axs[0].text(4.25, 27.55, "CDW", color="k")
axs[0].text(4.1, 27.85, "AABW", rotation=0, color="k")
axs[0].format(
yreverse=True,
xlim=(0.5, 5.00),
ylim=(26.5, 27.9),
xticklabels=REGIONS,
xtickminor=False,
xrotation=45,
ylabel="potential density, $\sigma_{0}$ [kg m$^{-3}$]",
)
######################
# POTENTIAL PCO2 PANEL
######################
axs[2].boxplot(
pco2sigmadata,
whis=(5, 95),
marker=EXTREMEMARKER,
markersize=EXTREMESIZE,
widths=BOXWIDTH,
fillcolor="black",
mediancolor="white",
boxcolor="black",
medianlw=1,
fillalpha=1,
caplw=CAPLW,
whiskerlw=WHISKERLW,
whiskercolor="black",
capcolor="black",
)
axs[2].violinplot(
pco2sigmadata,
fillcolor=VIOLINCOLOR,
fillalpha=1,
lw=0,
edgecolor=VIOLINCOLOR,
widths=0.65,
points=100,
)
axs[2].format(
ylim=(-150, 530),
yreverse=False,
ylabel="potential pCO$_{2}$ gradient with atmosphere [$\mu$atm]",
xticklabels=REGIONS,
xtickminor=False,
xrotation=45,
)
axs[2].area([0.5, 4.5], -300, 0, color="blue2", alpha=0.3, zorder=0)
axs[2].area([0.5, 4.5], 0, 530, color="red2", alpha=0.3, zorder=0)
axs[2].text(4.7, -200, "into ocean", rotation=90, color="k")
axs[2].text(4.7, 80, "into atmosphere", rotation=90, color="k")
axs.format(abc=True, abcstyle="(a)", abcloc="ul")
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Probability Authors.
Licensed under the Apache License, Version 2.0 (the "License");
```
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Optimizers in TensorFlow Probability
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/probability/examples/Optimizers_in_TensorFlow_Probability"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/Optimizers_in_TensorFlow_Probability.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/Optimizers_in_TensorFlow_Probability.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/probability/tensorflow_probability/examples/jupyter_notebooks/Optimizers_in_TensorFlow_Probability.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
## Abstract
In this colab we demonstrate how to use the various optimizers implemented in TensorFlow Probability.
## Dependencies & Prerequisites
```
#@title Import { display-mode: "form" }
%matplotlib inline
import contextlib
import functools
import os
import time
import numpy as np
import pandas as pd
import scipy as sp
from six.moves import urllib
from sklearn import preprocessing
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import tensorflow_probability as tfp
```
## BFGS and L-BFGS Optimizers
Quasi Newton methods are a class of popular first order optimization algorithm. These methods use a positive definite approximation to the exact Hessian to find the search direction.
The Broyden-Fletcher-Goldfarb-Shanno
algorithm ([BFGS](https://en.wikipedia.org/wiki/Broyden%E2%80%93Fletcher%E2%80%93Goldfarb%E2%80%93Shanno_algorithm)) is a specific implementation of this general idea. It is applicable and is the method of choice for medium sized problems
where the gradient is continuous everywhere (e.g. linear regression with an $L_2$ penalty).
[L-BFGS](https://en.wikipedia.org/wiki/Limited-memory_BFGS) is a limited-memory version of BFGS that is useful for solving larger problems whose Hessian matrices cannot be computed at a reasonable cost or are not sparse. Instead of storing fully dense $n \times n$ approximations of Hessian matrices, they only save a few vectors of length $n$ that represent these approximations implicitly.
```
#@title Helper functions
CACHE_DIR = os.path.join(os.sep, 'tmp', 'datasets')
def make_val_and_grad_fn(value_fn):
@functools.wraps(value_fn)
def val_and_grad(x):
return tfp.math.value_and_gradient(value_fn, x)
return val_and_grad
@contextlib.contextmanager
def timed_execution():
t0 = time.time()
yield
dt = time.time() - t0
print('Evaluation took: %f seconds' % dt)
def np_value(tensor):
"""Get numpy value out of possibly nested tuple of tensors."""
if isinstance(tensor, tuple):
return type(tensor)(*(np_value(t) for t in tensor))
else:
return tensor.numpy()
def run(optimizer):
"""Run an optimizer and measure it's evaluation time."""
optimizer() # Warmup.
with timed_execution():
result = optimizer()
return np_value(result)
```
### L-BFGS on a simple quadratic function
```
# Fix numpy seed for reproducibility
np.random.seed(12345)
# The objective must be supplied as a function that takes a single
# (Tensor) argument and returns a tuple. The first component of the
# tuple is the value of the objective at the supplied point and the
# second value is the gradient at the supplied point. The value must
# be a scalar and the gradient must have the same shape as the
# supplied argument.
# The `make_val_and_grad_fn` decorator helps transforming a function
# returning the objective value into one that returns both the gradient
# and the value. It also works for both eager and graph mode.
dim = 10
minimum = np.ones([dim])
scales = np.exp(np.random.randn(dim))
@make_val_and_grad_fn
def quadratic(x):
return tf.reduce_sum(scales * (x - minimum) ** 2, axis=-1)
# The minimization routine also requires you to supply an initial
# starting point for the search. For this example we choose a random
# starting point.
start = np.random.randn(dim)
# Finally an optional argument called tolerance let's you choose the
# stopping point of the search. The tolerance specifies the maximum
# (supremum) norm of the gradient vector at which the algorithm terminates.
# If you don't have a specific need for higher or lower accuracy, leaving
# this parameter unspecified (and hence using the default value of 1e-8)
# should be good enough.
tolerance = 1e-10
@tf.function
def quadratic_with_lbfgs():
return tfp.optimizer.lbfgs_minimize(
quadratic,
initial_position=tf.constant(start),
tolerance=tolerance)
results = run(quadratic_with_lbfgs)
# The optimization results contain multiple pieces of information. The most
# important fields are: 'converged' and 'position'.
# Converged is a boolean scalar tensor. As the name implies, it indicates
# whether the norm of the gradient at the final point was within tolerance.
# Position is the location of the minimum found. It is important to check
# that converged is True before using the value of the position.
print('L-BFGS Results')
print('Converged:', results.converged)
print('Location of the minimum:', results.position)
print('Number of iterations:', results.num_iterations)
```
### Same problem with BFGS
```
@tf.function
def quadratic_with_bfgs():
return tfp.optimizer.bfgs_minimize(
quadratic,
initial_position=tf.constant(start),
tolerance=tolerance)
results = run(quadratic_with_bfgs)
print('BFGS Results')
print('Converged:', results.converged)
print('Location of the minimum:', results.position)
print('Number of iterations:', results.num_iterations)
```
## Linear Regression with L1 penalty: Prostate Cancer data
Example from the Book: *The Elements of Statistical Learning, Data Mining, Inference, and Prediction* by Trevor Hastie, Robert Tibshirani and Jerome Friedman.
Note this is an optimization problem with L1 penalty.
### Obtain dataset
```
def cache_or_download_file(cache_dir, url_base, filename):
"""Read a cached file or download it."""
filepath = os.path.join(cache_dir, filename)
if tf.io.gfile.exists(filepath):
return filepath
if not tf.io.gfile.exists(cache_dir):
tf.io.gfile.makedirs(cache_dir)
url = url_base + filename
print("Downloading {url} to {filepath}.".format(url=url, filepath=filepath))
urllib.request.urlretrieve(url, filepath)
return filepath
def get_prostate_dataset(cache_dir=CACHE_DIR):
"""Download the prostate dataset and read as Pandas dataframe."""
url_base = 'http://web.stanford.edu/~hastie/ElemStatLearn/datasets/'
return pd.read_csv(
cache_or_download_file(cache_dir, url_base, 'prostate.data'),
delim_whitespace=True, index_col=0)
prostate_df = get_prostate_dataset()
```
### Problem definition
```
np.random.seed(12345)
feature_names = ['lcavol', 'lweight', 'age', 'lbph', 'svi', 'lcp',
'gleason', 'pgg45']
# Normalize features
scalar = preprocessing.StandardScaler()
prostate_df[feature_names] = pd.DataFrame(
scalar.fit_transform(
prostate_df[feature_names].astype('float64')))
# select training set
prostate_df_train = prostate_df[prostate_df.train == 'T']
# Select features and labels
features = prostate_df_train[feature_names]
labels = prostate_df_train[['lpsa']]
# Create tensors
feat = tf.constant(features.values, dtype=tf.float64)
lab = tf.constant(labels.values, dtype=tf.float64)
dtype = feat.dtype
regularization = 0 # regularization parameter
dim = 8 # number of features
# We pick a random starting point for the search
start = np.random.randn(dim + 1)
def regression_loss(params):
"""Compute loss for linear regression model with L1 penalty
Args:
params: A real tensor of shape [dim + 1]. The zeroth component
is the intercept term and the rest of the components are the
beta coefficients.
Returns:
The mean square error loss including L1 penalty.
"""
params = tf.squeeze(params)
intercept, beta = params[0], params[1:]
pred = tf.matmul(feat, tf.expand_dims(beta, axis=-1)) + intercept
mse_loss = tf.reduce_sum(
tf.cast(
tf.losses.mean_squared_error(y_true=lab, y_pred=pred), tf.float64))
l1_penalty = regularization * tf.reduce_sum(tf.abs(beta))
total_loss = mse_loss + l1_penalty
return total_loss
```
### Solving with L-BFGS
Fit using L-BFGS. Even though the L1 penalty introduces derivative discontinuities, in practice, L-BFGS works quite well still.
```
@tf.function
def l1_regression_with_lbfgs():
return tfp.optimizer.lbfgs_minimize(
make_val_and_grad_fn(regression_loss),
initial_position=tf.constant(start),
tolerance=1e-8)
results = run(l1_regression_with_lbfgs)
minimum = results.position
fitted_intercept = minimum[0]
fitted_beta = minimum[1:]
print('L-BFGS Results')
print('Converged:', results.converged)
print('Intercept: Fitted ({})'.format(fitted_intercept))
print('Beta: Fitted {}'.format(fitted_beta))
```
### Solving with Nelder Mead
The [Nelder Mead method](https://en.wikipedia.org/wiki/Nelder%E2%80%93Mead_method) is one of the most popular derivative free minimization methods. This optimizer doesn't use gradient information and makes no assumptions on the differentiability of the target function; it is therefore appropriate for non-smooth objective functions, for example optimization problems with L1 penalty.
For an optimization problem in $n$-dimensions it maintains a set of
$n+1$ candidate solutions that span a non-degenerate simplex. It successively modifies the simplex based on a set of moves (reflection, expansion, shrinkage and contraction) using the function values at each of the vertices.
```
# Nelder mead expects an initial_vertex of shape [n + 1, 1].
initial_vertex = tf.expand_dims(tf.constant(start, dtype=dtype), axis=-1)
@tf.function
def l1_regression_with_nelder_mead():
return tfp.optimizer.nelder_mead_minimize(
regression_loss,
initial_vertex=initial_vertex,
func_tolerance=1e-10,
position_tolerance=1e-10)
results = run(l1_regression_with_nelder_mead)
minimum = results.position.reshape([-1])
fitted_intercept = minimum[0]
fitted_beta = minimum[1:]
print('Nelder Mead Results')
print('Converged:', results.converged)
print('Intercept: Fitted ({})'.format(fitted_intercept))
print('Beta: Fitted {}'.format(fitted_beta))
```
## Logistic Regression with L2 penalty
For this example, we create a synthetic data set for classification and use the L-BFGS optimizer to fit the parameters.
```
np.random.seed(12345)
dim = 5 # The number of features
n_obs = 10000 # The number of observations
betas = np.random.randn(dim) # The true beta
intercept = np.random.randn() # The true intercept
features = np.random.randn(n_obs, dim) # The feature matrix
probs = sp.special.expit(
np.matmul(features, np.expand_dims(betas, -1)) + intercept)
labels = sp.stats.bernoulli.rvs(probs) # The true labels
regularization = 0.8
feat = tf.constant(features)
lab = tf.constant(labels, dtype=feat.dtype)
@make_val_and_grad_fn
def negative_log_likelihood(params):
"""Negative log likelihood for logistic model with L2 penalty
Args:
params: A real tensor of shape [dim + 1]. The zeroth component
is the intercept term and the rest of the components are the
beta coefficients.
Returns:
The negative log likelihood plus the penalty term.
"""
intercept, beta = params[0], params[1:]
logit = tf.matmul(feat, tf.expand_dims(beta, -1)) + intercept
log_likelihood = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(
labels=lab, logits=logit))
l2_penalty = regularization * tf.reduce_sum(beta ** 2)
total_loss = log_likelihood + l2_penalty
return total_loss
start = np.random.randn(dim + 1)
@tf.function
def l2_regression_with_lbfgs():
return tfp.optimizer.lbfgs_minimize(
negative_log_likelihood,
initial_position=tf.constant(start),
tolerance=1e-8)
results = run(l2_regression_with_lbfgs)
minimum = results.position
fitted_intercept = minimum[0]
fitted_beta = minimum[1:]
print('Converged:', results.converged)
print('Intercept: Fitted ({}), Actual ({})'.format(fitted_intercept, intercept))
print('Beta:\n\tFitted {},\n\tActual {}'.format(fitted_beta, betas))
```
## Batching support
Both BFGS and L-BFGS support batched computation, for example to optimize a single function from many different starting points; or multiple parametric functions from a single point.
### Single function, multiple starting points
Himmelblau's function is a standard optimization test case. The function is given by:
$$f(x, y) = (x^2 + y - 11)^2 + (x + y^2 - 7)^2$$
The function has four minima located at:
- (3, 2),
- (-2.805118, 3.131312),
- (-3.779310, -3.283186),
- (3.584428, -1.848126).
All these minima may be reached from appropriate starting points.
```
# The function to minimize must take as input a tensor of shape [..., n]. In
# this n=2 is the size of the domain of the input and [...] are batching
# dimensions. The return value must be of shape [...], i.e. a batch of scalars
# with the objective value of the function evaluated at each input point.
@make_val_and_grad_fn
def himmelblau(coord):
x, y = coord[..., 0], coord[..., 1]
return (x * x + y - 11) ** 2 + (x + y * y - 7) ** 2
starts = tf.constant([[1, 1],
[-2, 2],
[-1, -1],
[1, -2]], dtype='float64')
# The stopping_condition allows to further specify when should the search stop.
# The default, tfp.optimizer.converged_all, will proceed until all points have
# either converged or failed. There is also a tfp.optimizer.converged_any to
# stop as soon as the first point converges, or all have failed.
@tf.function
def batch_multiple_starts():
return tfp.optimizer.lbfgs_minimize(
himmelblau, initial_position=starts,
stopping_condition=tfp.optimizer.converged_all,
tolerance=1e-8)
results = run(batch_multiple_starts)
print('Converged:', results.converged)
print('Minima:', results.position)
```
### Multiple functions
For demonstration purposes, in this example we simultaneously optimize a large number of high dimensional randomly generated quadratic bowls.
```
np.random.seed(12345)
dim = 100
batches = 500
minimum = np.random.randn(batches, dim)
scales = np.exp(np.random.randn(batches, dim))
@make_val_and_grad_fn
def quadratic(x):
return tf.reduce_sum(input_tensor=scales * (x - minimum)**2, axis=-1)
# Make all starting points (1, 1, ..., 1). Note not all starting points need
# to be the same.
start = tf.ones((batches, dim), dtype='float64')
@tf.function
def batch_multiple_functions():
return tfp.optimizer.lbfgs_minimize(
quadratic, initial_position=start,
stopping_condition=tfp.optimizer.converged_all,
max_iterations=100,
tolerance=1e-8)
results = run(batch_multiple_functions)
print('All converged:', np.all(results.converged))
print('Largest error:', np.max(results.position - minimum))
```
| github_jupyter |
YAML support is provided by PyYAML at http://pyyaml.org/. This notebook depends on it.
```
import yaml
```
The following cell provides an initial example of a *note* in our system.
A *note* is nothing more than a YAML document. The idea of notetaking is to keep it simple, so a note should make no assumptions about formatting whatsoever.
In our current thinking, we have the following sections:
- title: an optional title (text)
- tags: one or more keywords (text, sequence of text, no nesting)
- mentions: one or more mentions (text, sequence of text, no nesting)
- outline: one or more items (text, sequence of text, nesting is permitted)
- dates (numeric text, sequence, must follow established historical ways of representing dates)
- text (text from the source as multiline string)
- bibtex, ris, or inline (text for the bibliographic item; will be syntax checked)
- bibkey (text, a hopefully unique identifier for referring to this source in other Zettels)
- cite: Used to cite a bibkey from the same or other notes. In addition, the citation may be represented as a list, where the first item is the bibkey and subsequent items are pages or ranges of page numbers. See below for a good example of how this will work.
- note (any additional details that you wish to hide from indexing)
In most situations, freeform text is permitted. If you need to do crazy things, you must put quotes around the text so YAML can process it. However, words separated by whitespace and punctuation seems to work fine in most situations.
These all are intended to be string data, so there are no restrictions on what can be in any field; however, we will likely limit tags, mentions, dates in some way as we go forward. Fields such as bibtex, ris, or inline are also subject to validity checking.
Print the document to the console (nothing special here).
```
myFirstZettel="""
title: First BIB Note for Castells
tags:
- Castells
- Network Society
- Charles Babbage is Awesome
- Charles Didn't do Everything
mentions:
- gkt
- dbdennis
dates: 2016
cite:
- Castells Rise 2016
- ii-iv
- 23-36
outline:
- Introduction
- - Computers
- People
- Conclusions
- - Great Ideas of Computing
text: |
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam eleifend est sed diam maximus rutrum. Quisque sit amet imperdiet odio, id tristique libero. Aliquam viverra convallis mauris vel tristique. Cras ac dolor non risus porttitor molestie vel at nisi. Donec vitae finibus quam. Phasellus vehicula urna sed nibh condimentum, ultrices interdum velit eleifend. Nam suscipit dolor eu rutrum fringilla. Sed pulvinar purus purus, sit amet venenatis enim convallis a. Duis fringilla nisl sit amet erat lobortis dictum. Nunc fringilla arcu nec ex blandit, a gravida purus commodo. Vivamus lacinia tellus dui, vel maximus lacus ornare id.
Vivamus euismod justo sit amet luctus bibendum. Integer non mi ullamcorper enim fringilla vulputate sit amet in urna. Nullam eu sodales ipsum. Curabitur id convallis ex. Duis a condimentum lorem. Nulla et urna massa. Duis in nibh eu elit lobortis vehicula. Mauris congue mauris mollis metus lacinia, ut suscipit mi egestas. Donec luctus ante ante, eget viverra est mollis vitae.
Vivamus in purus in erat dictum scelerisque. Aliquam dictum quis ligula ac euismod. Mauris elementum metus vel scelerisque feugiat. Vivamus bibendum massa eu pellentesque sodales. Nulla nec lacus dolor. Donec scelerisque, nibh sed placerat gravida, nunc turpis tristique nibh, ac feugiat enim massa ut eros. Nulla finibus, augue egestas hendrerit accumsan, tellus augue tempor eros, in sagittis dolor turpis nec mi. Nunc fringilla mi non malesuada aliquet.
bibkey:
Castells Rise 1996
bibtex: |
@book{castells_rise_1996,
address = {Cambridge, Mass.},
series = {Castells, {Manuel}, 1942- {Information} age . v},
title = {The rise of the network society},
isbn = {978-1-55786-616-5},
language = {eng},
publisher = {Blackwell Publishers},
author = {Castells, Manuel},
year = {1996},
keywords = {Information networks., Information society., Information technology Economic aspects., Information technology Social aspects., Technology and civilization.}
}
note:
George likes this new format.
"""
print(myFirstZettel)
```
This shows how to load just the YAML portion of the document, resulting in a Python dictionary data structure. Observe that the Python dictionary has { key : value, ... }. So we can extract the YAML fields from the Python dictionary data structure.
Notice that when you write a YAML list of mentions, there is a nested Python list ['gkt', 'dbdennis'].
```
doc = yaml.load(myFirstZettel)
```
Closing the loop, the following shows how to *iterate* the keys of the data structure.
```
for key in doc.keys():
print(key, "=", doc[key])
```
And this shows how to get any particular item of interest. In this case, we're extracting the *bibtex* key so we can do something with the embedded BibTeX (e.g. print it).
```
print(doc['bibkey'])
print(doc['bibtex'])
```
Adapted from http://stackoverflow.com/questions/12472338/flattening-a-list-recursively. There really must be a nicer way to do stuff like this. I will rewrite this using a walker so we can have custom processing of the list items.
```
def flatten(item):
if type(item) != type([]):
return [str(item)]
if item == []:
return item
if isinstance(item[0], list):
return flatten(item[0]) + flatten(item[1:])
return item[:1] + flatten(item[1:])
flatten("George was here")
flatten(['A', ['B', 'C'], ['D', ['E']]])
```
Now we are onto some `sqlite3` explorations.
Ordinarily, I would use some sort of mapping framework to handle database operations. However, it's not clear the FTS support is part of any ORM (yet). I will continue to research but since there is likely only one table, it might not be worth the trouble.
Next we will actually add the Zettel to the database and do a test query. Almost there.
```
import sqlite3
# This is for showing data structures only.
import pprint
printer = pprint.PrettyPrinter(indent=2)
class SQLiteFTS(object):
def __init__(self, db_name, table_name, field_names):
self.db_name = db_name
self.conn = sqlite3.connect(db_name)
self.cursor = self.conn.cursor()
self.table_name = table_name
self.fts_field_names = field_names
self.fts_field_refs = ['?'] * len(self.fts_field_names) # for sqlite insert template generation
self.fts_field_init = [''] * len(self.fts_field_names)
self.fts_fields = dict(zip(self.fts_field_names, self.fts_field_refs))
self.fts_default_record = dict(zip(self.fts_field_names, self.fts_field_init))
def bind(self, doc):
self.record = self.fts_default_record.copy()
for k in doc.keys():
if k in self.record.keys():
self.record[k] = doc[k]
else:
print("Unknown fts field %s" % k)
self.record.update(doc)
def drop_table(self):
self.conn.execute("DROP TABLE IF EXISTS %s" % self.table_name)
def create_table(self):
sql_fields = ",".join(self.fts_default_record.keys())
print("CREATE VIRTUAL TABLE zettels USING fts4(%s)" % sql_fields)
self.conn.execute("CREATE VIRTUAL TABLE zettels USING fts4(%s)" % sql_fields)
def insert_into_table(self):
sql_params = ",".join(self.fts_fields.values())
#printer.pprint(self.record)
#printer.pprint(self.record.values())
sql_insert_values = [ ",".join(flatten(value)) for value in list(self.record.values())]
print("INSERT INTO zettels VALUES (%s)" % sql_params)
print(self.record.keys())
printer.pprint(sql_insert_values)
self.conn.execute("INSERT INTO zettels VALUES (%s)" % sql_params, sql_insert_values)
def done(self):
self.conn.commit()
self.conn.close()
sql = SQLiteFTS('zettels.db', 'zettels', ['title', 'tags', 'mentions', 'outline', 'cite', 'dates', 'summary', 'text', 'bibkey', 'bibtex', 'ris', 'inline', 'note' ])
#doc_keys = list(doc.keys())
#doc_keys.sort()
#rec_keys = list(sql.record.keys())
#rec_keys.sort()
#print("doc keys %s" % doc_keys)
#print("record keys %s" % rec_keys)
sql.drop_table()
sql.create_table()
printer.pprint(doc)
sql.bind(doc)
sql.insert_into_table()
sql.done()
#sql_insert_values = [ str(field) for field in sql.record.values()]
#print(sql_insert_values)
#print(record)
with open("xyz.txt") as datafile:
text = datafile.read()
print(text)
bibkey = 'blahblahblah'
bibtex = text
import yaml
from collections import OrderedDict
class quoted(str): pass
def quoted_presenter(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"')
yaml.add_representer(quoted, quoted_presenter)
class literal(str): pass
def literal_presenter(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
yaml.add_representer(literal, literal_presenter)
def ordered_dict_presenter(dumper, data):
return dumper.represent_dict(data.items())
yaml.add_representer(OrderedDict, ordered_dict_presenter)
d = OrderedDict(bibkey=bibkey, bibtex=literal(bibtex))
print(yaml.dump(d))
```
| github_jupyter |
# 1. LGDE.com 일별 지표생성 실습 1일차 (정답)
#### 주피터 노트북 단축키 (Windows 환경)
| 단축키 | 설명 | 기타 |
| --- | --- | --- |
| Alt+Enter | 현재 셀 실행 + 다음 셀 추가 | 초기 개발 시에 주로 사용 |
| Shift+Enter | 현재 셀 실행 + 다음 셀 이동 | 전체 테스트 시에 주로 사용 |
| Ctrl+Enter | 현재 셀 실행 + 이동 안함 | 하나씩 점검 혹은 디버깅 시에 사용 |
| Ctrl+/ | 주석 적용 및 해제 | Shift 키로 여러 줄을 선택하고 주석 및 해제 사용 |
| Ctrl+s | 전체 저장 | - |
### 주피터 노트북 유의사항
* 모든 셀은 Code, Markdown, Raw 3가지 유형이 존재하며, **파이썬 코드 실행**은 반드시 ***Code*** 모드에서 수행되어야 합니다
* 현재 셀의 실행이 무한 루프에 빠지거나 너무 오래 걸리는 경우 상단 Menu 에서 ***Kernel - Interrupt Kernel*** 메뉴를 통해 현재 셀의 작업만 중지할 수 있습니다
* 메모리 혹은 다양한 이슈로 인해 제대로 동작하지 않는 경우에는 상단 Menu 에서 ***Kernel - Restart Kernel..*** 메뉴를 통해 재시작할 수 있습니다
## 5. 수집된 데이터 탐색
### 5-1. 스파크 세션 생성
```
from pyspark.sql import *
from pyspark.sql.functions import *
from pyspark.sql.types import *
from IPython.display import display, display_pretty, clear_output, JSON
spark = (
SparkSession
.builder
.config("spark.sql.session.timeZone", "Asia/Seoul")
.getOrCreate()
)
# 노트북에서 테이블 형태로 데이터 프레임 출력을 위한 설정을 합니다
spark.conf.set("spark.sql.repl.eagerEval.enabled", True) # display enabled
spark.conf.set("spark.sql.repl.eagerEval.truncate", 100) # display output columns size
# 공통 데이터 위치
home_jovyan = "/home/jovyan"
work_data = f"{home_jovyan}/work/data"
work_dir=!pwd
work_dir = work_dir[0]
# 로컬 환경 최적화
spark.conf.set("spark.sql.shuffle.partitions", 5) # the number of partitions to use when shuffling data for joins or aggregations.
spark.conf.set("spark.sql.streaming.forceDeleteTempCheckpointLocation", "true")
spark
user25 = spark.read.parquet("user/20201025")
user25.printSchema()
user25.show(truncate=False)
display(user25)
purchase25 = spark.read.parquet("purchase/20201025")
purchase25.printSchema()
display(purchase25)
access25 = spark.read.option("inferSchema", "true").json("access/20201025")
access25.printSchema()
display(access25)
```
### 5-2. 수집된 고객, 매출 및 접속 임시 테이블 생성
```
user25.createOrReplaceTempView("user25")
purchase25.createOrReplaceTempView("purchase25")
access25.createOrReplaceTempView("access25")
spark.sql("show tables '*25'")
```
### 5-3. SparkSQL을 이용하여 테이블 별 데이터프레임 생성하기
```
u_signup_condition = "u_signup >= '20201025' and u_signup < '20201026'"
user = spark.sql("select u_id, u_name, u_gender from user25").where(u_signup_condition)
user.createOrReplaceTempView("user")
p_time_condition = "p_time >= '2020-10-25 00:00:00' and p_time < '2020-10-26 00:00:00'"
purchase = spark.sql("select from_unixtime(p_time) as p_time, p_uid, p_id, p_name, p_amount from purchase25").where(p_time_condition)
purchase.createOrReplaceTempView("purchase")
access = spark.sql("select a_id, a_tag, a_timestamp, a_uid from access25")
access.createOrReplaceTempView("access")
spark.sql("show tables")
```
### 5-4. 생성된 테이블을 SQL 문을 이용하여 탐색하기
```
whereCondition = "u_gender = '남'"
spark.sql("select * from user").where(whereCondition)
selectClause = "select * from purchase where p_amount > 2000000"
spark.sql(selectClause)
groupByClause="select a_id, count(1) from access group by a_id"
spark.sql(groupByClause)
```
## 6. 기본 지표 생성
### 6-1. DAU (Daily Activer User) 지표를 생성하세요
```
display(access)
distinctAccessUser = "select count(distinct a_uid) as DAU from access"
dau = spark.sql(distinctAccessUser)
display(dau)
```
### 6-2. DPU (Daily Paying User) 지표를 생성하세요
```
display(purchase)
distinctPayingUser = "select count(distinct p_uid) as PU from purchase"
pu = spark.sql(distinctPayingUser)
display(pu)
```
### 6-3. DR (Daily Revenue) 지표를 생성하세요
```
display(purchase)
sumOfDailyRevenue = "select sum(p_amount) as DR from purchase"
dr = spark.sql(sumOfDailyRevenue)
display(dr)
```
### 6-4. ARPU (Average Revenue Per User) 지표를 생성하세요
```
v_dau = dau.collect()[0]["DAU"]
v_pu = pu.collect()[0]["PU"]
v_dr = dr.collect()[0]["DR"]
print("ARPU : {}".format(v_dr / v_dau))
```
### 6-5. ARPPU (Average Revenue Per Paying User) 지표를 생성하세요
```
print("ARPPU : {}".format(v_dr / v_pu))
```
## 7. 고급 지표 생성
### 7-1. 디멘젼 테이블을 설계 합니다
### 7-2. 오픈 첫 날 접속한 모든 고객 및 접속 횟수를 가진 데이터프레임을 생성합니다
```
access.printSchema()
countOfAccess = "select a_uid, count(a_uid) as a_count from access group by a_uid order by a_uid asc"
accs = spark.sql(countOfAccess)
display(accs)
```
### 7-3. 일 별 이용자 별 총 매출 금액과, 구매 횟수를 가지는 데이터프레임을 생성합니다
```
purchase.printSchema()
sumOfCountAndAmount = "select p_uid, count(p_uid) as p_count, sum(p_amount) as p_amount from purchase group by p_uid order by p_uid asc"
amts = spark.sql(sumOfCountAndAmount)
display(amts)
```
### 7-4. 이용자 정보와 구매 정보와 조인합니다
```
accs.printSchema()
amts.printSchema()
joinCondition = accs.a_uid == amts.p_uid
joinHow = "left_outer"
dim1 = accs.join(amts, joinCondition, joinHow)
dim1.printSchema()
display(dim1.orderBy(asc("a_uid")))
```
### 7-5. 고객 정보를 추가합니다
```
dim1.printSchema()
user.printSchema()
joinCondition = dim1.a_uid == user.u_id
joinHow = "left_outer"
dim2 = dim1.join(user, joinCondition, joinHow)
dim2.printSchema()
display(dim2.orderBy(asc("a_uid")))
```
### 7-6. 중복되는 ID 컬럼은 제거하고, 숫자 필드에 널값은 0으로 기본값을 넣어줍니다
```
dim2.printSchema()
dim3 = dim2.drop("p_uid", "u_id")
fillDefaultValue = { "p_count":0, "p_amount":0 }
dim4 = dim3.na.fill(fillDefaultValue)
dim4.printSchema()
display(dim4.orderBy(asc("a_uid")))
```
### 7-7. 생성된 유저 테이블을 재사용 가능하도록 컬럼 명을 변경합니다
```
dim4.printSchema()
dim5 = (
dim4
.withColumnRenamed("a_uid", "d_uid")
.withColumnRenamed("a_count", "d_acount")
.withColumnRenamed("p_amount", "d_pamount")
.withColumnRenamed("p_count", "d_pcount")
.withColumnRenamed("u_name", "d_name")
.withColumnRenamed("u_gender", "d_gender")
.drop("a_uid", "a_count", "p_amount", "p_count", "u_name", "u_gender")
.select("d_uid", "d_name", "d_gender", "d_acount", "d_pamount", "d_pcount")
)
display(dim5.orderBy(asc("d_uid")))
```
### 7-8. 최초 구매 유저 정보를 추가합니다
```
purchase.printSchema()
selectFirstPurchaseTime = "select p_uid, min(p_time) as p_time from purchase group by p_uid"
first_purchase = spark.sql(selectFirstPurchaseTime)
dim6 = dim5.withColumn("d_first_purchase", lit(None))
dim6.printSchema()
exprFirstPurchase = expr("case when d_first_purchase is null then p_time else d_first_purchase end")
dim7 = (
dim6.join(first_purchase, dim5.d_uid == first_purchase.p_uid, "left_outer")
.withColumn("first_purchase", exprFirstPurchase)
.drop("d_first_purchase", "p_uid", "p_time")
.withColumnRenamed("first_purchase", "d_first_purchase")
)
dimension = dim7.orderBy(asc("d_uid"))
dimension.printSchema()
display(dimension)
```
### 7-9. 생성된 디멘젼을 저장소에 저장합니다
```
dimension.printSchema()
target_dir="dimension/dt=20201025"
dimension.write.mode("overwrite").parquet(target_dir)
```
### 7-10. 생성된 디멘젼을 다시 읽어서 출력합니다
```
newDimension = spark.read.parquet(target_dir)
newDimension.printSchema()
display(newDimension)
```
### 7-11. 오늘 생성된 지표를 MySQL 테이블로 저장합니다
```
print("DT:{}, DAU:{}, PU:{}, DR:{}".format("2020-10-25", v_dau, v_pu, v_dr))
today = "2020-10-25"
lgde_origin = spark.read.jdbc("jdbc:mysql://mysql:3306/testdb", "testdb.lgde", properties={"user": "sqoop", "password": "sqoop"}).where(col("dt") < lit(today))
lgde_today = spark.createDataFrame([(today, v_dau, v_pu, v_dr)], ["DT", "DAU", "PU", "DR"])
lgde = lgde_origin.union(lgde_today)
lgde.write.mode("overwrite").jdbc("jdbc:mysql://mysql:3306/testdb", "testdb.lgde", properties={"user": "sqoop", "password": "sqoop"})
```
| github_jupyter |
# lhorizon example 1: where was GALEX in May 2003?
Imagine that you are examining a portion of the observational data record of the GALEX space telescope from May 2003 and you realize that there is an anomaly that might be explicable by a barycentric time offset. A SPK SPICE kernel for GALEX may exist somewhere, but you do not know where. Horizons contains detailed information about the positions of many orbital and ground intrument platforms, and ```lhorizon``` can help you quickly figure out where GALEX was during this period.
This is a relatively short time period at relatively coarse resolution. If you realize that you need higher resolution or if you'd like to do larger queries -- ones with more than about 70K rows -- take a look at the bulk query functions in mars_sun_angle.ipynb.
```
# run these imports if you'd like the code to function
from lhorizon import LHorizon
from lhorizon.lhorizon_utils import utc_to_tdb
# horizons code for the SSB
solar_system_barycenter = '500@0'
coordinate_origin = solar_system_barycenter
# horizons knows the name "GALEX". Its Horizons numeric id, -127783, could also be used.
galex_horizons_id = "GALEX"
target_body_id = galex_horizons_id
# Time units are not consistent across different types of Horizons queries. in particular,
# times for vectors queries are in TDB, which in this case is about 64 seconds later than UTC.
# lhorizon.lhorizon_utils provides a function to convert from UTC to TDB. it works for dates later
# than 1972. for dates earlier than 1972, use spiceypy or astropy.time.
start = '2003-05-01T00:00:00'
stop = '2003-05-15T01:00:00'
step = "5m"
start_tdb = utc_to_tdb(start).isoformat()
stop_tdb = utc_to_tdb(stop).isoformat()
# make a LHorizon with these values.
galex_icrf = LHorizon(
galex_horizons_id,
coordinate_origin,
epochs = {
'start': start_tdb,
'stop': stop_tdb,
'step': step
},
query_type='VECTORS'
)
# fetch these data and concatenate them into a pandas dataframe.
# the LHorizon.table() method grabs a selection of columns from
# the Horizons response, regularizes units to meters and
# seconds, and makes some column names clearer or more tractable.
# if you want the full, unvarnished collection of values returned by Horizons
# with no modifications other than whitespace removal,
# use the LHorizons.dataframe() method instead.
vector_table = galex_icrf.table()
# note that the coordinate system in this particular query is ICRF
# of the most conventional kind -- measured from the solar system barycenter,
# geometric states uncorrected for light time or stellar aberration.
# columns are:
# time_tdb: time -- still in the TDB scale
# x, y, z: components of position vector in m
# vx, vy, vz: components of velocity vector in m/s
# dist: distance in m
# velocity: velocity in m/s
vector_table
# since this is a pandas dataframe, it can be easily manipulated in Python. If you'd rather work with it
# in some other way, it can also be easily written to CSV.
vector_table.to_csv("vector_table " + start + " to " + stop + ".csv", index=None)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/conference-submitter/jax-md/blob/master/notebooks/flocking.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Copyright JAX MD Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
```
#@title Imports & Utils
# Imports
!pip install -q git+https://www.github.com/conference-submitter/jax-md
import numpy as onp
from jax.config import config ; config.update('jax_enable_x64', True)
import jax.numpy as np
from jax import random
from jax import jit
from jax import vmap
from jax import lax
from jax.experimental.vectorize import vectorize
from functools import partial
from collections import namedtuple
import base64
import IPython
from google.colab import output
import os
from jax_md import space, smap, energy, minimize, quantity, simulate, partition, util
from jax_md.util import f32
# Plotting
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style(style='white')
dark_color = [56 / 256] * 3
light_color = [213 / 256] * 3
axis_color = 'white'
def format_plot(x='', y='', grid=True):
ax = plt.gca()
ax.spines['bottom'].set_color(axis_color)
ax.spines['top'].set_color(axis_color)
ax.spines['right'].set_color(axis_color)
ax.spines['left'].set_color(axis_color)
ax.tick_params(axis='x', colors=axis_color)
ax.tick_params(axis='y', colors=axis_color)
ax.yaxis.label.set_color(axis_color)
ax.xaxis.label.set_color(axis_color)
ax.set_facecolor(dark_color)
plt.grid(grid)
plt.xlabel(x, fontsize=20)
plt.ylabel(y, fontsize=20)
def finalize_plot(shape=(1, 1)):
plt.gcf().patch.set_facecolor(dark_color)
plt.gcf().set_size_inches(
shape[0] * 1.5 * plt.gcf().get_size_inches()[1],
shape[1] * 1.5 * plt.gcf().get_size_inches()[1])
plt.tight_layout()
# Progress Bars
from IPython.display import HTML, display
import time
def ProgressIter(iter_fun, iter_len=0):
if not iter_len:
iter_len = len(iter_fun)
out = display(progress(0, iter_len), display_id=True)
for i, it in enumerate(iter_fun):
yield it
out.update(progress(i + 1, iter_len))
def progress(value, max):
return HTML("""
<progress
value='{value}'
max='{max}',
style='width: 45%'
>
{value}
</progress>
""".format(value=value, max=max))
normalize = lambda v: v / np.linalg.norm(v, axis=1, keepdims=True)
# Rendering
renderer_code = IPython.display.HTML('''
<canvas id="canvas"></canvas>
<script>
Rg = null;
Ng = null;
var current_scene = {
R: null,
N: null,
is_loaded: false,
frame: 0,
frame_count: 0,
boid_vertex_count: 0,
boid_buffer: [],
predator_vertex_count: 0,
predator_buffer: [],
disk_vertex_count: 0,
disk_buffer: null,
box_size: 0
};
google.colab.output.setIframeHeight(0, true, {maxHeight: 5000});
async function load_simulation() {
buffer_size = 400;
max_frame = 800;
result = await google.colab.kernel.invokeFunction(
'notebook.GetObstacles', [], {});
data = result.data['application/json'];
if(data.hasOwnProperty('Disk')) {
current_scene = put_obstacle_disk(current_scene, data.Disk);
}
for (var i = 0 ; i < max_frame ; i += buffer_size) {
console.log(i);
result = await google.colab.kernel.invokeFunction(
'notebook.GetBoidStates', [i, i + buffer_size], {});
data = result.data['application/json'];
current_scene = put_boids(current_scene, data);
}
current_scene.is_loaded = true;
result = await google.colab.kernel.invokeFunction(
'notebook.GetPredators', [], {});
data = result.data['application/json'];
if (data.hasOwnProperty('R'))
current_scene = put_predators(current_scene, data);
result = await google.colab.kernel.invokeFunction(
'notebook.GetSimulationInfo', [], {});
current_scene.box_size = result.data['application/json'].box_size;
}
function initialize_gl() {
const canvas = document.getElementById("canvas");
canvas.width = 640;
canvas.height = 640;
const gl = canvas.getContext("webgl2");
if (!gl) {
alert('Unable to initialize WebGL.');
return;
}
gl.viewport(0, 0, gl.drawingBufferWidth, gl.drawingBufferHeight);
gl.clearColor(0.2, 0.2, 0.2, 1.0);
gl.enable(gl.DEPTH_TEST);
const shader_program = initialize_shader(
gl, VERTEX_SHADER_SOURCE_2D, FRAGMENT_SHADER_SOURCE_2D);
const shader = {
program: shader_program,
attribute: {
vertex_position: gl.getAttribLocation(shader_program, 'vertex_position'),
},
uniform: {
screen_position: gl.getUniformLocation(shader_program, 'screen_position'),
screen_size: gl.getUniformLocation(shader_program, 'screen_size'),
color: gl.getUniformLocation(shader_program, 'color'),
},
};
gl.useProgram(shader_program);
const half_width = 200.0;
gl.uniform2f(shader.uniform.screen_position, half_width, half_width);
gl.uniform2f(shader.uniform.screen_size, half_width, half_width);
gl.uniform4f(shader.uniform.color, 0.9, 0.9, 1.0, 1.0);
return {gl: gl, shader: shader};
}
var loops = 0;
function update_frame() {
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
if (!current_scene.is_loaded) {
window.requestAnimationFrame(update_frame);
return;
}
var half_width = current_scene.box_size / 2.;
gl.uniform2f(shader.uniform.screen_position, half_width, half_width);
gl.uniform2f(shader.uniform.screen_size, half_width, half_width);
if (current_scene.frame >= current_scene.frame_count) {
if (!current_scene.is_loaded) {
window.requestAnimationFrame(update_frame);
return;
}
loops++;
current_scene.frame = 0;
}
gl.enableVertexAttribArray(shader.attribute.vertex_position);
gl.bindBuffer(gl.ARRAY_BUFFER, current_scene.boid_buffer[current_scene.frame]);
gl.uniform4f(shader.uniform.color, 0.0, 0.35, 1.0, 1.0);
gl.vertexAttribPointer(
shader.attribute.vertex_position,
2,
gl.FLOAT,
false,
0,
0
);
gl.drawArrays(gl.TRIANGLES, 0, current_scene.boid_vertex_count);
if(current_scene.predator_buffer.length > 0) {
gl.bindBuffer(gl.ARRAY_BUFFER, current_scene.predator_buffer[current_scene.frame]);
gl.uniform4f(shader.uniform.color, 1.0, 0.35, 0.35, 1.0);
gl.vertexAttribPointer(
shader.attribute.vertex_position,
2,
gl.FLOAT,
false,
0,
0
);
gl.drawArrays(gl.TRIANGLES, 0, current_scene.predator_vertex_count);
}
if(current_scene.disk_buffer) {
gl.bindBuffer(gl.ARRAY_BUFFER, current_scene.disk_buffer);
gl.uniform4f(shader.uniform.color, 0.9, 0.9, 1.0, 1.0);
gl.vertexAttribPointer(
shader.attribute.vertex_position,
2,
gl.FLOAT,
false,
0,
0
);
gl.drawArrays(gl.TRIANGLES, 0, current_scene.disk_vertex_count);
}
current_scene.frame++;
if ((current_scene.frame_count > 1 && loops < 5) ||
(current_scene.frame_count == 1 && loops < 240))
window.requestAnimationFrame(update_frame);
if (current_scene.frame_count > 1 && loops == 5 && current_scene.frame < current_scene.frame_count - 1)
window.requestAnimationFrame(update_frame);
}
function put_boids(scene, boids) {
const R = decode(boids['R']);
const R_shape = boids['R_shape'];
const theta = decode(boids['theta']);
const theta_shape = boids['theta_shape'];
function index(i, b, xy) {
return i * R_shape[1] * R_shape[2] + b * R_shape[2] + xy;
}
var steps = R_shape[0];
var boids = R_shape[1];
var dimensions = R_shape[2];
if(dimensions != 2) {
alert('Can only deal with two-dimensional data.')
}
// First flatten the data.
var buffer_data = new Float32Array(boids * 6);
var size = 8.0;
for (var i = 0 ; i < steps ; i++) {
var buffer = gl.createBuffer();
for (var b = 0 ; b < boids ; b++) {
var xi = index(i, b, 0);
var yi = index(i, b, 1);
var ti = i * boids + b;
var Nx = size * Math.cos(theta[ti]); //N[xi];
var Ny = size * Math.sin(theta[ti]); //N[yi];
buffer_data.set([
R[xi] + Nx, R[yi] + Ny,
R[xi] - Nx - 0.5 * Ny, R[yi] - Ny + 0.5 * Nx,
R[xi] - Nx + 0.5 * Ny, R[yi] - Ny - 0.5 * Nx,
], b * 6);
}
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.bufferData(gl.ARRAY_BUFFER, buffer_data, gl.STATIC_DRAW);
scene.boid_buffer.push(buffer);
}
scene.boid_vertex_count = boids * 3;
scene.frame_count += steps;
return scene;
}
function put_predators(scene, boids) {
// TODO: Unify this with the put_boids function.
const R = decode(boids['R']);
const R_shape = boids['R_shape'];
const theta = decode(boids['theta']);
const theta_shape = boids['theta_shape'];
function index(i, b, xy) {
return i * R_shape[1] * R_shape[2] + b * R_shape[2] + xy;
}
var steps = R_shape[0];
var boids = R_shape[1];
var dimensions = R_shape[2];
if(dimensions != 2) {
alert('Can only deal with two-dimensional data.')
}
// First flatten the data.
var buffer_data = new Float32Array(boids * 6);
var size = 18.0;
for (var i = 0 ; i < steps ; i++) {
var buffer = gl.createBuffer();
for (var b = 0 ; b < boids ; b++) {
var xi = index(i, b, 0);
var yi = index(i, b, 1);
var ti = theta_shape[1] * i + b;
var Nx = size * Math.cos(theta[ti]);
var Ny = size * Math.sin(theta[ti]);
buffer_data.set([
R[xi] + Nx, R[yi] + Ny,
R[xi] - Nx - 0.5 * Ny, R[yi] - Ny + 0.5 * Nx,
R[xi] - Nx + 0.5 * Ny, R[yi] - Ny - 0.5 * Nx,
], b * 6);
}
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.bufferData(gl.ARRAY_BUFFER, buffer_data, gl.STATIC_DRAW);
scene.predator_buffer.push(buffer);
}
scene.predator_vertex_count = boids * 3;
return scene;
}
function put_obstacle_disk(scene, disk) {
const R = decode(disk.R);
const R_shape = disk.R_shape;
const radius = decode(disk.D);
const radius_shape = disk.D_shape;
const disk_count = R_shape[0];
const dimensions = R_shape[1];
if (dimensions != 2) {
alert('Can only handle two-dimensional data.');
}
if (radius_shape[0] != disk_count) {
alert('Inconsistent disk radius count found.');
}
const segments = 32;
function index(o, xy) {
return o * R_shape[1] + xy;
}
var buffer_data = new Float32Array(disk_count * segments * 6);
for (var i = 0 ; i < disk_count ; i++) {
var xi = index(i, 0);
var yi = index(i, 1);
for (var s = 0 ; s < segments ; s++) {
const th = 2 * s / segments * Math.PI;
const th_p = 2 * (s + 1) / segments * Math.PI;
const rad = radius[i] * 0.8;
buffer_data.set([
R[xi], R[yi],
R[xi] + rad * Math.cos(th), R[yi] + rad * Math.sin(th),
R[xi] + rad * Math.cos(th_p), R[yi] + rad * Math.sin(th_p),
], i * segments * 6 + s * 6);
}
}
var buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.bufferData(gl.ARRAY_BUFFER, buffer_data, gl.STATIC_DRAW);
scene.disk_vertex_count = disk_count * segments * 3;
scene.disk_buffer = buffer;
return scene;
}
// SHADER CODE
const VERTEX_SHADER_SOURCE_2D = `
// Vertex Shader Program.
attribute vec2 vertex_position;
uniform vec2 screen_position;
uniform vec2 screen_size;
void main() {
vec2 v = (vertex_position - screen_position) / screen_size;
gl_Position = vec4(v, 0.0, 1.0);
}
`;
const FRAGMENT_SHADER_SOURCE_2D = `
precision mediump float;
uniform vec4 color;
void main() {
gl_FragColor = color;
}
`;
function initialize_shader(
gl, vertex_shader_source, fragment_shader_source) {
const vertex_shader = compile_shader(
gl, gl.VERTEX_SHADER, vertex_shader_source);
const fragment_shader = compile_shader(
gl, gl.FRAGMENT_SHADER, fragment_shader_source);
const shader_program = gl.createProgram();
gl.attachShader(shader_program, vertex_shader);
gl.attachShader(shader_program, fragment_shader);
gl.linkProgram(shader_program);
if (!gl.getProgramParameter(shader_program, gl.LINK_STATUS)) {
alert(
'Unable to initialize shader program: ' +
gl.getProgramInfoLog(shader_program)
);
return null;
}
return shader_program;
}
function compile_shader(gl, type, source) {
const shader = gl.createShader(type);
gl.shaderSource(shader, source);
gl.compileShader(shader);
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
alert('An error occured compiling shader: ' + gl.getShaderInfoLog(shader));
gl.deleteShader(shader);
return null;
}
return shader;
}
// SERIALIZATION UTILITIES
function decode(sBase64, nBlocksSize) {
var chrs = atob(atob(sBase64));
var array = new Uint8Array(new ArrayBuffer(chrs.length));
for(var i = 0 ; i < chrs.length ; i++) {
array[i] = chrs.charCodeAt(i);
}
return new Float32Array(array.buffer);
}
// RUN CELL
load_simulation();
gl_and_shader = initialize_gl();
var gl = gl_and_shader.gl;
var shader = gl_and_shader.shader;
update_frame();
</script>
''')
def encode(R):
return base64.b64encode(onp.array(R, onp.float32).tobytes())
def render(box_size, states, obstacles=None, predators=None):
if isinstance(states, Boids):
R = np.reshape(states.R, (1,) + states.R.shape)
theta = np.reshape(states.theta, (1,) + states.theta.shape)
elif isinstance(states, list):
if all([isinstance(x, Boids) for x in states]):
R, theta = zip(*states)
R = onp.stack(R)
theta = onp.stack(theta)
if isinstance(predators, list):
R_predators, theta_predators, *_ = zip(*predators)
R_predators = onp.stack(R_predators)
theta_predators = onp.stack(theta_predators)
def get_boid_states(start, end):
R_, theta_ = R[start:end], theta[start:end]
return IPython.display.JSON(data={
"R_shape": R_.shape,
"R": encode(R_),
"theta_shape": theta_.shape,
"theta": encode(theta_)
})
output.register_callback('notebook.GetBoidStates', get_boid_states)
def get_obstacles():
if obstacles is None:
return IPython.display.JSON(data={})
else:
return IPython.display.JSON(data={
'Disk': {
'R': encode(obstacles.R),
'R_shape': obstacles.R.shape,
'D': encode(obstacles.D),
'D_shape': obstacles.D.shape
}
})
output.register_callback('notebook.GetObstacles', get_obstacles)
def get_predators():
if predators is None:
return IPython.display.JSON(data={})
else:
return IPython.display.JSON(data={
'R': encode(R_predators),
'R_shape': R_predators.shape,
'theta': encode(theta_predators),
'theta_shape': theta_predators.shape
})
output.register_callback('notebook.GetPredators', get_predators)
def get_simulation_info():
return IPython.display.JSON(data={
'frames': R.shape[0],
'box_size': box_size
})
output.register_callback('notebook.GetSimulationInfo', get_simulation_info)
return renderer_code
```
#### **Warning**: At the moment you must actually run the cells of the notebook to see the visualizations. After running the simulations in this notebook, you have to wait a moment (5 - 30 seconds) for rendering.
# Flocks, Herds, and Schools: A Distributed Behavioral Model
We will go over the paper, ["Flocks, Herds, and Schools: A Distributed Behavioral Model"]((https://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=E252054B1C02D387E8C20827CB414543?doi=10.1.1.103.7187&rep=rep1&type=pdf)) published by C. W. Reynolds in SIGGRAPH 1987. The paper itself is fantastic and, as far as a description of flocking is concerned, there is little that we can offer. Therefore, rather than go through the paper directly, we will use [JAX](https://www.github.com/google/jax) and [JAX, MD](https://www.github.com/google/jax-md) to interactively build a simulation similar to Reynolds' in colab. To simplify our discussion, we will build a two-dimensional version of Reynolds' simulation.
In nature there are many examples in which large numbers of animals exhibit complex collective motion (schools of fish, flocks of birds, herds of horses, colonies of ants). In his seminal paper, Reynolds introduces a model of such collective behavior (henceforth refered to as "flocking") based on simple rules that can be computed locally for each entity (referred to as a "boid") in the flock based on its environment. This paper is written in the context of computer graphics and so Reynolds is going for biologically inspired simulations that look right rather than accuracy in any statistical sense. Ultimately, Reynolds measures success in terms of "delight" people find in watching the simulations; we will use a similar metric here.
Note, we recommend running this notebook in "Dark" mode.
## Boids
Reynolds is interested in simulating bird-like entities that are described by a position, $R$, and an orientation, $\theta$. This state can optionally augmented with extra information (for example, hunger or fear). We can define a Boids type that stores data for a collection of boids as two arrays. `R` is an `ndarray` of shape `[boid_count, spatial_dimension]` and `theta` is an ndarray of shape `[boid_count]`. An individual boid is an index into these arrays. It will often be useful to refer to the vector orientation of the boid $N = (\cos\theta, \sin\theta)$.
```
Boids = namedtuple('Boids', ['R', 'theta'])
```
We can instantiate a collection of boids randomly in a box of side length $L$. We will use [periodic boundary conditions](https://en.wikipedia.org/wiki/Periodic_boundary_conditions) for our simulation which means that boids will be able to wrap around the sides of the box. To do this we will use the `space.periodic` command in [JAX, MD](https://github.com/google/jax-md#spaces-spacepy).
```
# Simulation Parameters:
box_size = 800.0 # A float specifying the side-length of the box.
boid_count = 200 # An integer specifying the number of boids.
dim = 2 # The spatial dimension in which we are simulating.
# Create RNG state to draw random numbers (see LINK).
rng = random.PRNGKey(0)
# Define periodic boundary conditions.
displacement, shift = space.periodic(box_size)
# Initialize the boids.
rng, R_rng, theta_rng = random.split(rng, 3)
boids = Boids(
R = box_size * random.uniform(R_rng, (boid_count, dim)),
theta = random.uniform(theta_rng, (boid_count,), maxval=2. * np.pi)
)
display(render(box_size, boids))
```
## Dynamics
Now that we have defined our boids, we have to imbue them with some rules governing their motion. Reynolds notes that in nature flocks do not seem to have a maximum size, but instead can keep acquiring new boids and grow without bound. He also comments that each boid cannot possibly be keeping track of the entire flock and must, instead, be focusing on its local neighborhood. Reynolds then proposes three simple, local, rules that boids might try to follow:
1. **Alignment:** Boids will try to align themselves in the direction of their neighbors.
2. **Avoidance:** Boids will avoid colliding with their neighbors.
3. **Cohesion:** Boids will try to move towards the center of mass of their neighbors.
In his exposition, Reynolds is vague about the details for each of these rules and so we will take some creative liberties. We will try to phrase this problem as an energy model, so our goal will be to write down an "energy" function (similar to a "loss") $E(R, \theta)$ such that low-energy configurations of boids satisfy each of the three rules above.
\
We will write the total energy as a sum of three terms, one for each of the rules above:
$$E(R, \theta) = E_{\text{Align}}(R, \theta) + E_{\text{Avoid}}(R, \theta) + E_{\text{Cohesion}}(R,\theta)$$
We will go through each of these rules separately below starting with alignment. Of course, any of these terms could be replaced by a learned solution.
\
Once we have an energy defined in this way, configurations of boids that move along low energy trajectories might display behavior that looks appealing. However, we still have a lot of freedom to decide how we want to define dynamics over the boids. Reynolds says he uses overdamped dynamics and so we will do something similar. In particular, we will update the position of the boids so that they try to move to minimize their energy. Simultaneously , we assume that the boids are swimming (or flying / walking). We choose a particularly simple model of this to start with and assume that the boids move at a fixed speed, $v$, along whatever direction they are pointing. We will use simple forward-Euler integration. This gives an update step,
$${R_i}' = R_i + \delta t(v\hat N_i - \nabla_{R_i}E(R, \theta))$$
where $\delta t$ is a timestep that we are allowed to choose. We will often refer to the force, $F^{R_i} = -\nabla_{R_i} E(R, \hat N)$ as the negative gradient of the energy with respect to the position of the $i$'th boid.
\
We will update the orientations of the boids turn them towards "low energy" directions. To do this we will once again use a simple forward-Euler scheme,
$$
\theta'_i = \theta_i - \delta t\nabla_{\theta_i}E(R,\theta)
$$
This is just one choice of dynamics, but there are probably many that would work equally well! Feel free to play around with it. One easy improvement that one could imagine making would be to use a more sophisticated integrator. We include a Runge-Kutta 4 integrator at the top of the notebook for an adventurous reader.
\
To see what this looks like before we define any interactions, we can run a simulation with $E(R,\theta) = 0$ by first defining an `update` function that takes a boids state to a new boids state.
```
@vmap
def normal(theta):
return np.array([np.cos(theta), np.sin(theta)])
def dynamics(energy_fn, dt, speed):
@jit
def update(_, state):
R, theta = state['boids']
dstate = quantity.force(energy_fn)(state)
dR, dtheta = dstate['boids']
n = normal(state['boids'].theta)
state['boids'] = Boids(shift(R, dt * (speed * n + dR)),
theta + dt * dtheta)
return state
return update
```
Now we can run a simulation and save the boid positions to a `boids_buffer` which will just be a list.
```
update = dynamics(energy_fn=lambda state: 0., dt=1e-1, speed=1.)
boids_buffer = []
state = {
'boids': boids
}
for i in ProgressIter(range(400)):
state = lax.fori_loop(0, 50, update, state)
boids_buffer += [state['boids']]
display(render(box_size, boids_buffer))
```
### Alignment
While the above simulation works and our boids are moving happily along, it is not terribly interesting. The first thing that we can add to this simulation is the alignment rule. When writing down these rules, it is often easier to express them for a single pair of boids and then use JAX's [automatic vectorization](https://github.com/google/jax#auto-vectorization-with-vmap) via `vmap` to extend them to our entire simulation.
Given a pair of boids $i$ and $j$ we would like to choose an energy function that is minimized when they are pointing in the same direction. As discussed above, one of Reynolds' requirements was locality: boids should only interact with nearby boids. To do this, we introduce a cutoff $D_{\text{Align}}$ and ignore pairs of boids such that $\|\Delta R_{ij}\| > D_{\text{Align}}$ where $\Delta R_{ij} = R_i - R_j$. To make it so boids react smoothly we will have the energy start out at zero when $\|R_i - R_j\| = D_{\text{Align}}$ and increase smoothly as they get closer. Together, these simple ideas lead us to the following proposal,
$$\epsilon_{\text{Align}}(\Delta R_{ij}, \hat N_i, \hat N_j) = \begin{cases}\frac{J_{\text{Align}}}\alpha\left (1 - \frac{\|\Delta R_{ij}\|}{D_{\text{Align}}}\right)^\alpha(1 - \hat N_1 \cdot \hat N_2)^2 & \text{if $\|\Delta R_{ij}\| < D$}\\ 0 & \text{otherwise}\end{cases}$$
This energy will be maximized when $N_1$ and $N_2$ are anti-aligned and minimized when $N_1 = N_2$. In general, we would like our boids to turn to align themselves with their neighbors rather than shift their centers to move apart. Therefore, we'll insert a stop-gradient into the displacement.
```
def align_fn(dR, N_1, N_2, J_align, D_align, alpha):
dR = lax.stop_gradient(dR)
dr = space.distance(dR) / D_align
energy = J_align / alpha * (1. - dr) ** alpha * (1 - np.dot(N_1, N_2)) ** 2
return np.where(dr < 1.0, energy, 0.)
```
We can plot the energy for different alignments as well as different distances between boids. We see that the energy goes to zero for large distances and when the boids are aligned.
```
#@title Alignment Energy
N_1 = np.array([1.0, 0.0])
angles = np.linspace(0, np.pi, 60)
N_2 = vmap(lambda theta: np.array([np.cos(theta), np.sin(theta)]))(angles)
distances = np.linspace(0, 1, 5)
dRs = vmap(lambda r: np.array([r, 0.]))(distances)
fn = partial(align_fn, J_align=1., D_align=1., alpha=2.)
energy = vmap(vmap(fn, (None, None, 0)), (0, None, None))(dRs, N_1, N_2)
for d, e in zip(distances, energy):
plt.plot(angles, e, label='r = {}'.format(d), linewidth=3)
plt.xlim([0, np.pi])
format_plot('$\\theta$', '$E(r, \\theta)$')
plt.legend()
finalize_plot()
```
We can now our simulation with the alignment energy alone.
```
def energy_fn(state):
boids = state['boids']
E_align = partial(align_fn, J_align=0.5, D_align=45., alpha=3.)
# Map the align energy over all pairs of boids. While both applications
# of vmap map over the displacement matrix, each acts on only one normal.
E_align = vmap(vmap(E_align, (0, None, 0)), (0, 0, None))
dR = space.map_product(displacement)(boids.R, boids.R)
N = normal(boids.theta)
return 0.5 * np.sum(E_align(dR, N, N))
update = dynamics(energy_fn=energy_fn, dt=1e-1, speed=1.)
boids_buffer = []
state = {
'boids': boids
}
for i in ProgressIter(range(400)):
state = lax.fori_loop(0, 50, update, state)
boids_buffer += [state['boids']]
display(render(box_size, boids_buffer))
```
Now the boids align with one another and already the simulation is displaying interesting behavior!
### Avoidance
We can incorporate an avoidance rule that will keep the boids from bumping into one another. This will help them to form a flock with some volume rather than collapsing together. To this end, imagine a very simple model of boids that push away from one another if they get within a distance $D_{\text{Avoid}}$ and otherwise don't repel. We can use a simple energy similar to Alignment but without any angular dependence,
$$
\epsilon_{\text{Avoid}}(\Delta R_{ij}) = \begin{cases}\frac{J_{\text{Avoid}}}{\alpha}\left(1 - \frac{||\Delta R_{ij}||}{D_{\text{Avoid}}}\right)^\alpha & ||\Delta R_{ij}||<D_{\text{Avoid}} \\ 0 & \text{otherwise}\end{cases}
$$
This is implemented in the following Python function. Unlike the case of alignment, here we want boids to move away from one another and so we don't need a stop gradient on $\Delta R$.
```
def avoid_fn(dR, J_avoid, D_avoid, alpha):
dr = space.distance(dR) / D_avoid
return np.where(dr < 1.,
J_avoid / alpha * (1 - dr) ** alpha,
0.)
```
Plotting the energy we see that it is highest when boids are overlapping and then goes to zero smoothly until $||\Delta R|| = D_{\text{Align}}$.
```
#@title Avoidance Energy
dr = np.linspace(0, 2., 60)
dR = vmap(lambda r: np.array([0., r]))(dr)
Es = vmap(partial(avoid_fn, J_avoid=1., D_avoid=1., alpha=3.))(dR)
plt.plot(dr, Es, 'r', linewidth=3)
plt.xlim([0, 2])
format_plot('$r$', '$E$')
finalize_plot()
```
We can now run a version of our simulation with both alignment and avoidance.
```
def energy_fn(state):
boids = state['boids']
E_align = partial(align_fn, J_align=1., D_align=45., alpha=3.)
E_align = vmap(vmap(E_align, (0, None, 0)), (0, 0, None))
# New Avoidance Code
E_avoid = partial(avoid_fn, J_avoid=25., D_avoid=30., alpha=3.)
E_avoid = vmap(vmap(E_avoid))
#
dR = space.map_product(displacement)(boids.R, boids.R)
N = normal(boids.theta)
return 0.5 * np.sum(E_align(dR, N, N) + E_avoid(dR))
update = dynamics(energy_fn=energy_fn, dt=1e-1, speed=1.)
boids_buffer = []
state = {
'boids': boids
}
for i in ProgressIter(range(400)):
state = lax.fori_loop(0, 50, update, state)
boids_buffer += [state['boids']]
display(render(box_size, boids_buffer))
```
The avoidance term in the energy stops the boids from collapsing on top of one another.
### Cohesion
The final piece of Reynolds' boid model is cohesion. Notice that in the above simulation, the boids tend to move in the same direction but they also often drift apart. To make the boids behave more like schools of fish or birds, which maintain a more compact arrangement, we add a cohesion term to the energy.
The goal of the cohesion term is to align boids towards the center of mass of their neighbors. Given a boid, $i$, we can compute the center of mass position of its neighbors as,
$$
\Delta R_i = \frac 1{|\mathcal N|} \sum_{j\in\mathcal N}\Delta R_{ij}
$$
where we have let $\mathcal N$ be the set of boids such that $||\Delta R_{ij}|| < D_{\text{Cohesion}}$.
Given the center of mass displacements, we can define a reasonable cohesion energy as,
$$
\epsilon_{Cohesion}\left(\widehat{\Delta R}_i, N_i\right) = \frac12J_{\text{Cohesion}}\left(1 - \widehat {\Delta R}_i\cdot N\right)^2
$$
where $\widehat{\Delta R}_i = \Delta R_i / ||\Delta R_i||$ is the normalized vector pointing in the direction of the center of mass. This function is minimized when the boid is pointing in the direction of the center of mass.
We can implement the cohesion energy in the following python function. Note that as with alignment, we will have boids control their orientation and so we will insert a stop gradient on the displacement vector.
```
def cohesion_fn(dR, N, J_cohesion, D_cohesion, eps=1e-7):
dR = lax.stop_gradient(dR)
dr = np.linalg.norm(dR, axis=-1, keepdims=True)
mask = dr < D_cohesion
N_com = np.where(mask, 1.0, 0)
dR_com = np.where(mask, dR, 0)
dR_com = np.sum(dR_com, axis=1) / (np.sum(N_com, axis=1) + eps)
dR_com = dR_com / np.linalg.norm(dR_com + eps, axis=1, keepdims=True)
return f32(0.5) * J_cohesion * (1 - np.sum(dR_com * N, axis=1)) ** 2
def energy_fn(state):
boids = state['boids']
E_align = partial(align_fn, J_align=1., D_align=45., alpha=3.)
E_align = vmap(vmap(E_align, (0, None, 0)), (0, 0, None))
E_avoid = partial(avoid_fn, J_avoid=25., D_avoid=30., alpha=3.)
E_avoid = vmap(vmap(E_avoid))
# New Cohesion Code
E_cohesion = partial(cohesion_fn, J_cohesion=0.005, D_cohesion=40.)
#
dR = space.map_product(displacement)(boids.R, boids.R)
N = normal(boids.theta)
return (0.5 * np.sum(E_align(dR, N, N) + E_avoid(dR)) +
np.sum(E_cohesion(dR, N)))
update = dynamics(energy_fn=energy_fn, dt=1e-1, speed=1.)
boids_buffer = []
state = {
'boids': boids
}
for i in ProgressIter(range(400)):
state = lax.fori_loop(0, 50, update, state)
boids_buffer += [state['boids']]
display(render(box_size, boids_buffer))
```
Now the boids travel in tighter, more cohesive, packs. By tuning the range of the cohesive interaction and its strength you can change how strongly the boids attempt to stick together. However, if we raise it too high it can have some undesireable consequences.
```
def energy_fn(state):
boids = state['boids']
E_align = partial(align_fn, J_align=1., D_align=45., alpha=3.)
E_align = vmap(vmap(E_align, (0, None, 0)), (0, 0, None))
E_avoid = partial(avoid_fn, J_avoid=25., D_avoid=30., alpha=3.)
E_avoid = vmap(vmap(E_avoid))
E_cohesion = partial(cohesion_fn, J_cohesion=0.1, D_cohesion=40.) # Raised to 0.05.
dR = space.map_product(displacement)(boids.R, boids.R)
N = normal(boids.theta)
return (0.5 * np.sum(E_align(dR, N, N) + E_avoid(dR)) +
np.sum(E_cohesion(dR, N)))
update = dynamics(energy_fn=energy_fn, dt=1e-1, speed=1.)
boids_buffer = []
state = {
'boids': boids
}
for i in ProgressIter(range(400)):
state = lax.fori_loop(0, 50, update, state)
boids_buffer += [state['boids']]
display(render(box_size, boids_buffer))
```
### Looking Ahead
When the effect of cohesion is set to a large value, the boids cluster well. However, the motion of the individual flocks becomes less smooth and adopts an almost oscillatory behavior. This is caused by boids in the front of the pack getting pulled towards boids behind them.
To improve this situation, we follow Reynolds and note that animals don't really look in all directions. The behavior of our flocks might look more realistic if we encorporated "field of view" for the boids. To this end, in both the alignment function and the cohesion function we will ignore boids that are outside of the line of sight for the boid. We will have a particularly simple definition for line of sight by first defining, $\widehat{\Delta R_{ij}} \cdot N_i = \cos\theta_{ij}$ where $\theta_{ij}$ is the angle between the orientation of the boid and the vector from the boid to its neighbor.
Since most animals that display flocking behavior have eyes in the side of their head, as opposed to the front, we will define $\theta_{\text{min}}$ and $\theta_{\text{max}}$ to bound the angular field of view of the boids. Then, we assume each boid can see neighbors if $\cos\theta_{\text{min}} < \cos\theta < \cos\theta_\text{max}$.
```
def field_of_view_mask(dR, N, theta_min, theta_max):
dr = space.distance(dR)
dR_hat = dR / dr
ctheta = np.dot(dR_hat, N)
# Cosine is monotonically decreasing on [0, pi].
return np.logical_and(ctheta > np.cos(theta_max),
ctheta < np.cos(theta_min))
```
We can then adapt the cohesion function to incorporate an arbitrary mask,
```
def cohesion_fn(dR, N, mask, # New mask parameter.
J_cohesion, D_cohesion, eps=1e-7):
dR = lax.stop_gradient(dR)
dr = space.distance(dR)
mask = np.reshape(mask, mask.shape + (1,))
dr = np.reshape(dr, dr.shape + (1,))
# Updated Masking Code
mask = np.logical_and(dr < D_cohesion, mask)
#
N_com = np.where(mask, 1.0, 0)
dR_com = np.where(mask, dR, 0)
dR_com = np.sum(dR_com, axis=1) / (np.sum(N_com, axis=1) + eps)
dR_com = dR_com / np.linalg.norm(dR_com + eps, axis=1, keepdims=True)
return f32(0.5) * J_cohesion * (1 - np.sum(dR_com * N, axis=1)) ** 2
```
And finally run a simulation incorporating the field of view.
```
def energy_fn(state):
boids = state['boids']
E_align = partial(align_fn, J_align=12., D_align=45., alpha=3.)
E_align = vmap(vmap(E_align, (0, None, 0)), (0, 0, None))
E_avoid = partial(avoid_fn, J_avoid=25., D_avoid=30., alpha=3.)
E_avoid = vmap(vmap(E_avoid))
E_cohesion = partial(cohesion_fn, J_cohesion=0.05, D_cohesion=40.)
dR = space.map_product(displacement)(boids.R, boids.R)
N = normal(boids.theta)
# New FOV code.
fov = partial(field_of_view_mask,
theta_min=0.,
theta_max=np.pi / 3.)
# As before, we have to vmap twice over the displacement matrix, but only once
# over the normal.
fov = vmap(vmap(fov, (0, None)))
mask = fov(dR, N)
#
return (0.5 * np.sum(E_align(dR, N, N) * mask + E_avoid(dR)) +
np.sum(E_cohesion(dR, N, mask)))
update = dynamics(energy_fn=energy_fn, dt=1e-1, speed=1.)
boids_buffer = []
state = {
'boids': boids
}
for i in ProgressIter(range(400)):
state = lax.fori_loop(0, 50, update, state)
boids_buffer += [state['boids']]
display(render(box_size, boids_buffer))
```
## Extras
Now that the core elements of the simulation are working well enough, we can add some extras fairly easily. In particular, we'll try to add some obstacles and some predators.
### Obstacles
The first thing we'll add are obstacles that the boids and (soon) the predators will try to avoid as they wander around the simulation. For the purposes of this notebook, we'll restrict ourselves to disk-like obstacles. Each disk will be described by a center position and a radius, $D_\text{Obstacle}$.
```
Obstacle = namedtuple('Obstacle', ['R', 'D'])
```
Then we can instantiate some obstacles.
```
N_obstacle = 5
R_rng, D_rng = random.split(random.PRNGKey(5))
obstacles = Obstacle(
box_size * random.uniform(R_rng, (N_obstacle, 2)),
random.uniform(D_rng, (N_obstacle,), minval=30.0, maxval=100.0)
)
```
In a similar spirit to the energy functions above, we would like an energy function that encourages the boids to avoid obstacles. For this purpose we will pick an energy function that is similar in form to the alignment function above,
$$
\epsilon_\text{Obstacle}(\Delta R_{io}, N_i, D_o) = \begin{cases}\frac{J_\text{Obstacle}}{\alpha}\left(1 - \frac{\|\Delta R_{io}\|}{D_o}\right)^\alpha\left(1 + N_i\cdot \widehat{\Delta R_{io}}\right)^2 & \|\Delta R_{io}\| < D_o \\ 0 & \text{Otherwise}\end{cases}
$$
for $\Delta R_{io}$ the displacement vector between a boid $i$ and an obstacle $o$. This energy is zero when the boid and the obstacle are not overlapping. When they are overlapping, the energy is minimized when the boid is facing away from the obstacle.
\
We can write down the boid-energy function in python.
```
def obstacle_fn(dR, N, D, J_obstacle):
dr = space.distance(dR)
dR = dR / np.reshape(dr, dr.shape + (1,))
return np.where(dr < D,
J_obstacle * (1 - dr / D) ** 2 * (1 + np.dot(N, dR)) ** 2,
0.)
```
Now we can run a simulation that includes obstacles.
```
def energy_fn(state):
boids = state['boids']
d = space.map_product(displacement)
E_align = partial(align_fn, J_align=12., D_align=45., alpha=3.)
E_align = vmap(vmap(E_align, (0, None, 0)), (0, 0, None))
E_avoid = partial(avoid_fn, J_avoid=25., D_avoid=30., alpha=3.)
E_avoid = vmap(vmap(E_avoid))
E_cohesion = partial(cohesion_fn, J_cohesion=0.05, D_cohesion=40.)
dR = d(boids.R, boids.R)
N = normal(boids.theta)
fov = partial(field_of_view_mask,
theta_min=0.,
theta_max=np.pi / 3.)
fov = vmap(vmap(fov, (0, None)))
mask = fov(dR, N)
# New obstacle code
obstacles = state['obstacles']
dR_o = -d(boids.R, obstacles.R)
D = obstacles.D
E_obstacle = partial(obstacle_fn, J_obstacle=1000.)
E_obstacle = vmap(vmap(E_obstacle, (0, 0, None)), (0, None, 0))
#
return (0.5 * np.sum(E_align(dR, N, N) * mask + E_avoid(dR)) +
np.sum(E_cohesion(dR, N, mask)) + np.sum(E_obstacle(dR_o, N, D)))
update = dynamics(energy_fn=energy_fn, dt=1e-1, speed=1.)
boids_buffer = []
state = {
'boids': boids,
'obstacles': obstacles
}
for i in ProgressIter(range(400)):
state = lax.fori_loop(0, 50, update, state)
boids_buffer += [state['boids']]
display(render(box_size, boids_buffer, obstacles))
```
The boids are now successfully navigating obstacles in their environment.
### Predators
Next we are going to introduce some predators into the environment for the boids to run away from. Much like the boids, the predators will be described by a position and an angle.
```
Predator = namedtuple('Predator', ['R', 'theta'])
predators = Predator(R=np.array([[box_size / 2., box_size /2.]]),
theta=np.array([0.0]))
```
The predators will also follow similar dynamics to the boids, swimming in whatever direction they are pointing at some speed that we can choose. Unlike in the previous versions of the simulation, predators naturally introduce some asymmetry to the system. In particular, we would like the boids to flee from the predators, but we want the predators to chase the boids. To achieve this behavior, we will consider a system reminiscient of a two-player game in which the boids move to minize an energy,
$$
E_\text{Boid} = E_\text{Align} + E_\text{Avoid} + E_\text{Cohesion} + E_\text{Obstacle} + E_\text{Boid-Predator}.
$$
Simultaneously, the predators move in an attempt to minimize a simpler energy,
$$
E_\text{Predator} = E_\text{Predator-Boid} + E_\text{Obstacle}.
$$
To add predators to the environment we therefore need to add two rules, one that dictates the boids behavior near a predator and one for the behavior of predators near a group of boids. In both cases we will see that we can draw significant inspiration from behaviors that we've already developed.
\
We will start with the boid-predator function since it is a bit simpler. In fact, we can use an energy that is virtually identical to the obstacle avoidance energy since the desired behavior is the same.
$$
\epsilon_\text{Boid-Predator}(\Delta R_{ip}, N_i) = \frac{J_\text{Boid-Predator}}\alpha\left(1 - \frac{\|\Delta R_{ip}\|}{D_\text{Boid-Predator}}\right)^\alpha (1 + \widehat{\Delta R_{ip}}\cdot N_i)^2
$$
As before, this function is minimized when the boid is pointing away from the predators. Because we don't want the predators to experience this term we must include a stop-gradient on the predator positions.
```
def boid_predator_fn(R_boid, N_boid, R_predator, J, D, alpha):
N = N_boid
dR = displacement(lax.stop_gradient(R_predator), R_boid)
dr = np.linalg.norm(dR, keepdims=True)
dR_hat = dR / dr
return np.where(dr < D,
J / alpha * (1 - dr / D) ** alpha * (1 + np.dot(dR_hat, N)),
0.)
```
For the predator-boid function we can borrow the cohesion energy that we developed above to have predators that turn towards the center-of-mass of boids in their field of view.
```
def predator_boid_fn(R_predator, N_predator, R_boids, J, D, eps=1e-7):
# It is most convenient to define the predator_boid energy function
# for a single predator and a whole flock of boids. As such we expect shapes,
# R_predator : (spatial_dim,)
# N_predator : (spatial_dim,)
# R_boids : (n, spatial_dim,)
N = N_predator
# As such, we need to vectorize over the boids.
d = vmap(displacement, (0, None))
dR = d(lax.stop_gradient(R_boids), R_predator)
dr = space.distance(dR)
fov = partial(field_of_view_mask,
theta_min=0.,
theta_max=np.pi / 3.)
# Here as well.
fov = vmap(fov, (0, None))
mask = np.logical_and(dr < D, fov(dR, N))
mask = mask[:, np.newaxis]
boid_count = np.where(mask, 1.0, 0)
dR_com = np.where(mask, dR, 0)
dR_com = np.sum(dR_com, axis=0) / (np.sum(boid_count, axis=0) + eps)
dR_com = dR_com / np.linalg.norm(dR_com + eps, keepdims=True)
return f32(0.5) * J * (1 - np.dot(dR_com, N)) ** 2
```
Now we can modify our dynamics to also update predators.
```
def dynamics(energy_fn, dt, boid_speed, predator_speed):
# We extract common movement functionality into a `move` function.
def move(boids, dboids, speed):
R, theta, *_ = boids
dR, dtheta = dboids
n = normal(theta)
return (shift(R, dt * (speed * n + dR)),
theta + dt * dtheta)
@jit
def update(_, state):
dstate = quantity.force(energy_fn)(state)
state['boids'] = Boids(*move(state['boids'], dstate['boids'], boid_speed))
state['predators'] = Predator(*move(state['predators'],
dstate['predators'],
predator_speed))
return state
return update
```
Finally, we can put everything together and run the simulation.
```
def energy_fn(state):
boids = state['boids']
d = space.map_product(displacement)
E_align = partial(align_fn, J_align=12., D_align=45., alpha=3.)
E_align = vmap(vmap(E_align, (0, None, 0)), (0, 0, None))
E_avoid = partial(avoid_fn, J_avoid=25., D_avoid=30., alpha=3.)
E_avoid = vmap(vmap(E_avoid))
E_cohesion = partial(cohesion_fn, J_cohesion=0.05, D_cohesion=40.)
dR = d(boids.R, boids.R)
N = normal(boids.theta)
fov = partial(field_of_view_mask,
theta_min=0.,
theta_max=np.pi / 3.)
fov = vmap(vmap(fov, (0, None)))
mask = fov(dR, N)
obstacles = state['obstacles']
dR_bo = -d(boids.R, obstacles.R)
D = obstacles.D
E_obstacle = partial(obstacle_fn, J_obstacle=1000.)
E_obstacle = vmap(vmap(E_obstacle, (0, 0, None)), (0, None, 0))
# New predator code.
predators = state['predators']
E_boid_predator = partial(boid_predator_fn, J=256.0, D=75.0, alpha=3.)
E_boid_predator = vmap(vmap(E_boid_predator, (0, 0, None)), (None, None, 0))
N_predator = normal(predators.theta)
E_predator_boid = partial(predator_boid_fn, J=0.1, D=95.0)
E_predator_boid = vmap(E_predator_boid, (0, 0, None))
dR_po = -d(predators.R, obstacles.R)
#
E_boid = (0.5 * np.sum(E_align(dR, N, N) * mask + E_avoid(dR)) +
np.sum(E_cohesion(dR, N, mask)) + np.sum(E_obstacle(dR_bo, N, D)) +
np.sum(E_boid_predator(boids.R, N, predators.R)))
E_predator = (np.sum(E_obstacle(dR_po, N_predator, D)) +
np.sum(E_predator_boid(predators.R, N_predator, boids.R)))
return E_boid + E_predator
update = dynamics(energy_fn=energy_fn, dt=1e-1, boid_speed=1., predator_speed=.85)
boids_buffer = []
predators_buffer = []
state = {
'boids': boids,
'obstacles': obstacles,
'predators': predators
}
for i in ProgressIter(range(400)):
state = lax.fori_loop(0, 50, update, state)
boids_buffer += [state['boids']]
predators_buffer += [state['predators']]
display(render(box_size, boids_buffer, obstacles, predators_buffer))
```
We see that our predator now moves around chasing the boids.
### Internal State
Until now, all of the data describing the boids, predators, and obstacles referred to their physical location and orientation. However, we can develop more interesting behavior if we allow the agents in our simulation to have extra data describing their internal state. As an example of this, we will allow predators to accelerate to chase boids if they get close.
\
To this end, we add at extra piece of data to our predator, $t_\text{sprint}$ which is the last time the predator accelerated. If the predator gets within $D_\text{sprint}$ of a boid and it has been at least $T_\text{sprint}$ units of time since it last sprinted it will accelerate. In practice, to accelerate the predator we will adjust its speed so that,
$$
s(t) = s_0 + s_1 e^{-(t - t_\text{sprint}) / C\tau_\text{sprint}}
$$
where $s_0$ is the normal speed of the predator, $s_0 + s_1$ is the peak speed, and $\tau_\text{sprint}$ determines how long the sprint lasts for. In practice, rather than storign $t_\text{sprint}$ we will record $\Delta t = t - t_\text{sprint}$ which is the time since the last sprint.
\
Implementing this first requires that we add the necessary data to the predators.
```
Predator = namedtuple('Predator', ['R', 'theta', 'dt'])
predators = Predator(R=np.array([[box_size / 2., box_size /2.]]),
theta=np.array([0.0]),
dt=np.array([0.]))
def dynamics(energy_fn, dt, boid_speed, predator_speed):
# We extract common movement functionality into a `move` function.
def move(boids, dboids, speed):
R, theta, *_ = boids
dR, dtheta, *_ = dboids
n = normal(theta)
return (shift(R, dt * (speed * n + dR)),
theta + dt * dtheta)
@jit
def update(_, state):
dstate = quantity.force(energy_fn)(state)
state['boids'] = Boids(*move(state['boids'], dstate['boids'], boid_speed))
# New code to accelerate the predators.
D_sprint = 65.
T_sprint = 300.
tau_sprint = 50.
sprint_speed = 2.0
# First we find the distance from each predator to the nearest boid.
d = space.map_product(space.metric(displacement))
predator = state['predators']
dr_min = np.min(d(state['boids'].R, predator.R), axis=1)
# Check whether there is a near enough boid to bother sprinting and if
# enough time has elapsed since the last sprint.
mask = np.logical_and(dr_min < D_sprint, predator.dt > T_sprint)
predator_dt = np.where(mask, 0., predator.dt + dt)
# Adjust the speed according to whether or not we're sprinting.
speed = predator_speed + sprint_speed * np.exp(-predator_dt / tau_sprint)
predator_R, predator_theta = move(state['predators'],
dstate['predators'],
speed)
state['predators'] = Predator(predator_R, predator_theta, predator_dt)
#
return state
return update
update = dynamics(energy_fn=energy_fn, dt=1e-1, boid_speed=1., predator_speed=.85)
boids_buffer = []
predators_buffer = []
state = {
'boids': boids,
'obstacles': obstacles,
'predators': predators
}
for i in ProgressIter(range(400)):
state = lax.fori_loop(0, 50, update, state)
boids_buffer += [state['boids']]
predators_buffer += [state['predators']]
display(render(box_size, boids_buffer, obstacles, predators_buffer))
```
## Scaling Up
Up to this point, we have simulated a relatively small flock of $n = 200$ boids. In part we have done this because we compute, at each step, an $n\times n$ matrix of distances. Therefore the computational complexity of the flocking simulation scales as $\mathcal O(n^2)$. However, as Reynolds' notes we have built in a locality assumption so that no boids interact provided they are further apart than $D =\max\{D_{\text{Align}}, D_\text{Avoid}, D_\text{Cohesion}\}$. JAX MD provides tools to construct a set of candidates for each boid in about $\mathcal O(n\log n)$ time by predcomputing a list of neighbors for each boid. Using neighbor lists we can scale to much larger simulations.
We create lists of all neighbors within a distance of $D + \delta$ and pack them into an array of shape $n\times n_\text{max_neighbors}$. Using this technique we only need to rebuild the neighbor list if any particle has moved more than a distance of $\delta$. We estimate `max_neighbors` from arrangements of particles and if any boid ever has more than this number of neighbors we must rebuild the neighbor list from scratch and recompile our simulation onto device.
To start with we setup a much larger system of boids.
```
# Simulation Parameters.
box_size = 2400.0 # A float specifying the side-length of the box.
boid_count = 2000 # An integer specifying the number of boids.
obstacle_count = 10 # An integer specifying the number of obstacles.
predator_count = 10 # An integer specifying the number of predators.
dim = 2 # The spatial dimension in which we are simulating.
# Create RNG state to draw random numbers.
rng = random.PRNGKey(0)
# Define periodic boundary conditions.
displacement, shift = space.periodic(box_size)
# Initialize the boids.
# To generate normal vectors that are uniformly distributed on S^N note that
# one can generate a random normal vector in R^N and then normalize it.
rng, R_rng, theta_rng = random.split(rng, 3)
boids = Boids(
R = box_size * random.uniform(R_rng, (boid_count, dim)),
theta = random.uniform(theta_rng, (boid_count,), maxval=2 * np.pi)
)
rng, R_rng, D_rng = random.split(rng, 3)
obstacles = Obstacle(
R = box_size * random.uniform(R_rng, (obstacle_count, dim)),
D = random.uniform(D_rng, (obstacle_count,), minval=100, maxval=300.)
)
rng, R_rng, theta_rng = random.split(rng, 3)
predators = Predator(
R = box_size * random.uniform(R_rng, (predator_count, dim)),
theta = random.uniform(theta_rng, (predator_count,), maxval=2 * np.pi),
dt = np.zeros((predator_count,))
)
neighbor_fn = partition.neighbor_list(displacement,
box_size,
r_cutoff=45.,
dr_threshold=10.,
capacity_multiplier=3)
neighbors = neighbor_fn(boids.R)
print(neighbors.idx.shape)
```
We see that dispite having 2000 boids, they each only have about 13 neighbors apiece at the start of the simulation. Of course this will grow over time and we will have to rebuild the neighbor list as it does. Next we make some minimal modifications to our energy function to rewrite the energy of our simulation to operate on neighbors. This mostly involves changing some of the vectorization patterns with `vmap` and creating a mask of which neighbors in the $n\times n_\text{max neighbors}$ arrays are filled. In JAX MD we use the pattern `mask = neighbors.idx == len(neighbors.idx)`.
```
def energy_fn(state, neighbors):
boids = state['boids']
d = space.map_product(displacement)
fov = partial(field_of_view_mask,
theta_min=0.,
theta_max=np.pi / 3.)
fov = vmap(vmap(fov, (0, None)))
E_align = partial(align_fn, J_align=12., D_align=45., alpha=3.)
E_align = vmap(vmap(E_align, (0, None, 0)))
E_avoid = partial(avoid_fn, J_avoid=25., D_avoid=30., alpha=3.)
E_avoid = vmap(vmap(E_avoid))
E_cohesion = partial(cohesion_fn, J_cohesion=0.05, D_cohesion=40.)
# New code to extract displacement vector to neighbors and normals.
R_neighbors = boids.R[neighbors.idx]
dR = -vmap(vmap(displacement, (None, 0)))(boids.R, R_neighbors)
N = normal(boids.theta)
N_neighbors = N[neighbors.idx]
#
# New code to add a mask over neighbors as well as field-of-view.
neighbor_mask = neighbors.idx < dR.shape[0]
fov_mask = np.logical_and(neighbor_mask, fov(dR, N))
#
obstacles = state['obstacles']
dR_bo = -d(boids.R, obstacles.R)
D = obstacles.D
E_obstacle = partial(obstacle_fn, J_obstacle=1000.)
E_obstacle = vmap(vmap(E_obstacle, (0, 0, None)), (0, None, 0))
predators = state['predators']
E_boid_predator = partial(boid_predator_fn, J=256.0, D=75.0, alpha=3.)
E_boid_predator = vmap(vmap(E_boid_predator, (0, 0, None)), (None, None, 0))
N_predator = normal(predators.theta)
E_predator_boid = partial(predator_boid_fn, J=0.1, D=95.0)
E_predator_boid = vmap(E_predator_boid, (0, 0, None))
dR_po = -d(predators.R, obstacles.R)
E_boid = (0.5 * np.sum(E_align(dR, N, N_neighbors) * fov_mask + E_avoid(dR)) +
np.sum(E_cohesion(dR, N, fov_mask)) + np.sum(E_obstacle(dR_bo, N, D)) +
np.sum(E_boid_predator(boids.R, N, predators.R)))
E_predator = (np.sum(E_obstacle(dR_po, N_predator, D)) +
np.sum(E_predator_boid(predators.R, N_predator, boids.R)))
return E_boid + E_predator
```
Next we have to update our simulation to use and update the neighbor list.
```
def dynamics(energy_fn, dt, boid_speed, predator_speed):
# We extract common movement functionality into a `move` function.
def move(boids, dboids, speed):
R, theta, *_ = boids
dR, dtheta, *_ = dboids
n = normal(theta)
return (shift(R, dt * (speed * n + dR)),
theta + dt * dtheta)
@jit
def update(_, state_and_neighbors):
state, neighbors = state_and_neighbors
# New code to update neighbor list.
neighbors = neighbor_fn(state['boids'].R, neighbors)
dstate = quantity.force(energy_fn)(state, neighbors)
state['boids'] = Boids(*move(state['boids'], dstate['boids'], boid_speed))
# Predator acceleration.
D_sprint = 65.
T_sprint = 300.
tau_sprint = 50.
sprint_speed = 2.0
d = space.map_product(space.metric(displacement))
predator = state['predators']
dr_min = np.min(d(state['boids'].R, predator.R), axis=1)
mask = np.logical_and(dr_min < D_sprint, predator.dt > T_sprint)
predator_dt = np.where(mask, 0., predator.dt + dt)
speed = predator_speed + sprint_speed * np.exp(-predator_dt / tau_sprint)
speed = speed[:, np.newaxis]
predator_R, predator_theta = move(state['predators'],
dstate['predators'],
speed)
state['predators'] = Predator(predator_R, predator_theta, predator_dt)
#
return state, neighbors
return update
```
And now we can conduct our larger simulation.
```
update = dynamics(energy_fn=energy_fn, dt=1e-1, boid_speed=1., predator_speed=.85)
boids_buffer = []
predators_buffer = []
state = {
'boids': boids,
'obstacles': obstacles,
'predators': predators
}
for i in ProgressIter(range(800)):
new_state, neighbors = lax.fori_loop(0, 50, update, (state, neighbors))
# If the neighbor list can't fit in the allocation, rebuild it but bigger.
if neighbors.did_buffer_overflow:
print('REBUILDING')
neighbors = neighbor_fn(state['boids'].R)
state, neighbors = lax.fori_loop(0, 50, update, (state, neighbors))
assert not neighbors.did_buffer_overflow
else:
state = new_state
boids_buffer += [state['boids']]
predators_buffer += [state['predators']]
display(render(box_size, boids_buffer, obstacles, predators_buffer))
```
At the end of the simulation we can see how large our neighbor list had to be to accomodate all of the boids.
```
print(neighbors.idx.shape)
```
| github_jupyter |
```
import pandas
import matplotlib as mpl
import xarray as xr
import numpy as np
import datetime as dt
dir_cmc='F:/data/sst/cmc/CMC0.2deg/v2/'
dir_cmc_clim='F:/data/sst/cmc/CMC0.2deg/v2/climatology/'
def get_filename(lyr,idyjl):
podaac_dir_v3 = 'https://podaac-opendap.jpl.nasa.gov/opendap/allData/ghrsst/data/GDS2/L4/GLOB/CMC/CMC0.1deg/v3/'
podaac_dir_v2 = 'https://podaac-opendap.jpl.nasa.gov/opendap/allData/ghrsst/data/GDS2/L4/GLOB/CMC/CMC0.2deg/v2/'
d = dt.date(lyr,1,1) + dt.timedelta(idyjl - 1)
syr=str(d.year).zfill(4)
smon=str(d.month).zfill(2)
sdym=str(d.day).zfill(2)
sjdy=str(idyjl).zfill(3)
if lyr<2017:
cmc_filename=podaac_dir_v2 + syr + smon + sdym + '120000-CMC-L4_GHRSST-SSTfnd-CMC0.1deg-GLOB-v02.0-fv03.0.nc'
else:
cmc_filename=podaac_dir_v3 + syr + smon + sdym + '120000-CMC-L4_GHRSST-SSTfnd-CMC0.1deg-GLOB-v02.0-fv03.0.nc'
#testing 0.1 CMC
filename = 'https://podaac-opendap.jpl.nasa.gov/opendap/allData/ghrsst/data/GDS2/L4/GLOB/CMC/CMC0.2deg/v2/1994/002/19940102120000-CMC-L4_GHRSST-SSTfnd-CMC0.2deg-GLOB-v02.0-fv02.0.nc'
ds_v2 = xr.open_dataset(filename)
new_lat = np.linspace(ds_v2.lat[0], ds_v2.lat[-1], ds_v2.dims['lat'])
new_lon = np.linspace(ds_v2.lon[0], ds_v2.lon[-1], ds_v2.dims['lon'])
ds2 = ds.interp(lat = new_lat,lon = new_lon)
#for 2017 - present use 0.1 CMC interpolated onto 0.2 grid for monthly averages and run on pythonanywhere
for lyr in range(2017,2019): #2017):
# ds_mnth=[]
# for imon in range(1,13):
# init = 0
for idyjl in range(1,366):
cmc_filename = get_filename(lyr,idyjl)
#cmc_filename = podaac_dir + syr + '/' + sjdy + '/' + fname_tem
#print(cmc_filename)
ds = xr.open_dataset(cmc_filename,drop_variables=['analysis_error','sea_ice_fraction'])
ds_masked = ds.where(ds['mask'] == 1.)
ds.close()
ds_masked['sq_sst']=ds_masked.analysed_sst**2
if init==0:
ds_sum = ds_masked
init = 1
else:
ds_sum = xr.concat([ds_sum,ds_masked],dim = 'time')
print(idyjl,ds_sum.dims)
# ds_sum = ds_sum.mean('time',skipna=True)
# ds_mnth.append(ds_sum)
combined = ds_sum.resample(time='1M').mean()
combined = xr.concat(ds_mnth, dim='time')
fname_tem='monthly_average_' + syr + '120000-CMC-L4_GHRSST-SSTfnd-CMC0.2deg-GLOB-v02.0-fv02.0.nc'
cmc_filename_out = './data/' + fname_tem
combined.to_netcdf(cmc_filename_out)
```
| github_jupyter |
```
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import pickle
import numpy as np
import pandas as pd
import skimage.io as io
import matplotlib.pyplot as plt
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
from keras.applications.resnet50 import preprocess_input
from keras.models import Model
```
### In this part, we conduct the following procedure to make our data be analytic-ready.
**Step 1.** For every species, we select out the **representative images**.
**Step 2.** For every species representative image, we calculate its **HSV values with regard of different parts** (body, forewing, hindwing, whole)
**Step 3.** For every species representative image, we extract its **2048-dimensional features** from the well-trained neural network model
**Step 4.** We cluster species based on either the 2-dimensional t-SNE map or 2048D features into **k assemblages through k-Means Clustering**
**Step 5.** We use **t-SNE to compress its 2048-dimensional features** into one dimension as the trait value
**Step 6.** We quantify the **assemblage-level color diversity** by calculating the average cosine distance among every pair of species in the same assemblage
### output files:
1. **all_complete_table.csv**: main result for further analysis where a row implies a **species**
2. **trait_analysis.csv**: trait value for T-statistics analysis (T stands for trait), where a row implies an **image**
3. **cluster_center.csv**: information about assemblage centers where a row implies an assemblage center
4. **in-cluser_pairwise_diversity.csv**: result of pair-wise color distance where a row implies a pair of species
```
model_dirname = '/home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181121_4'
# read testing dataset and set the path to obtain every part's mask
Xtest = pd.read_csv(os.path.join(model_dirname, 'test.csv'))
Xtest['img_rmbg_path'] = Xtest.Number.apply(lambda x: '/home/put_data/moth/data/whole_crop/'+str(x)+'.png')
Xtest['img_keep_body_path'] = Xtest.img_rmbg_path.apply(lambda x: x.replace('whole_crop','KEEP_BODY'))
Xtest['img_keep_down_path'] = Xtest.img_rmbg_path.apply(lambda x: x.replace('whole_crop','KEEP_DOWN'))
Xtest['img_keep_up_path'] = Xtest.img_rmbg_path.apply(lambda x: x.replace('whole_crop','KEEP_UP'))
Xtest = Xtest.reset_index()
Xtest.drop(columns='index', inplace=True)
# get the dictionary to look up the average elevation of a species
with open(os.path.join('/home/put_data/moth/metadata/1121_Y_mean_dict.pickle'), 'rb') as handle:
Y_dict = pickle.load(handle)
Ytest = np.vstack(Xtest['Species'].apply(lambda x: Y_dict[x]))
# aggregate the testing data by Species
df_species_group = Xtest.groupby('Species').apply(
lambda g: pd.Series({
'indices': g.index.tolist(),
}))
df_species_group = df_species_group.sample(frac=1).reset_index()
display(df_species_group.head())
```
### Step 1.
```
# select out the representative image which is the closest to its average elevation
sel = list()
for k in range(df_species_group.shape[0]):
row = df_species_group.iloc[k]
i = np.argmin(np.abs(np.array(Xtest.Alt[row['indices']]) - Y_dict[row['Species']]))
sel.append(row['indices'][i])
# Xout: DataFrame only contains representative images
Xout = Xtest.iloc[sel]
Yout = Ytest[sel]
Xout = Xout.reset_index()
Xout.drop(columns='index', inplace=True)
Xout.head()
```
### Step 2.
```
# extract the HSV features for species representatives
import skimage.color as color
def img_metrics(img):
hsv = color.rgb2hsv(img)
mask = 1.0 - (np.mean(img, axis=2)==255.0) + 0.0
x,y = np.where(mask)
mean_hsv = np.mean(hsv[x,y], axis=0)
std_hsv = np.std(hsv[x,y], axis=0)
return mean_hsv, std_hsv
df_reg_list = list()
species_list = list()
filename_list = list()
for k in range(Xout.shape[0]):
print(k, end='\r')
species = Xout.iloc[k]['Species']
species_list.append(species)
body_img = io.imread(Xout.iloc[k]['img_keep_body_path'])
mask = 1.0 - (np.mean(body_img, axis=2)==255.0) + 0.0
body_img[:,:,0] = body_img[:,:,0]*mask
body_img[:,:,1] = body_img[:,:,1]*mask
body_img[:,:,2] = body_img[:,:,2]*mask
img = io.imread(Xout.iloc[k]['img_keep_up_path'])
img += body_img
alt = Y_dict[Xout.iloc[k]['Species']]
mean_hsv, std_hsv = img_metrics(img)
whole_img = io.imread(Xout.iloc[k]['img_rmbg_path'])
whole_mean_hsv, whole_std_hsv = img_metrics(whole_img)
res = np.append(whole_mean_hsv[:3], mean_hsv[:3])
res = np.append(res, [alt])
df_reg_list.append(res)
df_reg_output = pd.DataFrame(data=df_reg_list,
columns=['h.whole', 's.whole', 'v.whole',
'h.body_fore','s.body_fore', 'v.body_fore','alt'])
```
### Step 3.
```
# extract 2048-dimensional features
from keras.models import load_model
model = load_model(os.path.join(model_dirname,'model.h5'))
features = model.get_layer('global_average_pooling2d_1')
extractor = Model(inputs=model.input, outputs=features.output)
TestImg = list()
for i in range(Xout.shape[0]):
img = io.imread(list(Xout['img_rmbg_path'])[i])
TestImg.append(img)
TestImg = np.stack(TestImg)
TestInput = preprocess_input(TestImg.astype(float))
Fout = extractor.predict(x=TestInput)
Yout = np.array([Y_dict[sp] for sp in Xout.Species])
np.save(file='Species_Representative_1047x2048.npy', arr=Fout)
Fout.shape
```
### Step 4.
```
# compress 2048-D features to 2-D map for visualization and clustering
from sklearn.manifold import TSNE
F_embedded = TSNE(n_components=2, perplexity=120).fit_transform(Fout)
from sklearn.cluster import KMeans
from sklearn import metrics
from time import time
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('%-9s\t%.2fs\t%.3f\t%.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.silhouette_score(data, estimator.labels_,
metric='cosine',
sample_size=500)))
return estimator
for k in [30]:
km = KMeans(init='k-means++', n_clusters=k, n_init=20)
km = bench_k_means(km, name="k-means++", data=Fout)
from collections import Counter
Counter(km.labels_)
Xout['tsne.0'] = F_embedded[:,0]
Xout['tsne.1'] = F_embedded[:,1]
Xout['km_label'] = km.labels_
# representative image information
resout = pd.concat([Xout, df_reg_output], axis=1)
resout.to_csv(os.path.join(model_dirname, 'all_complete_table.csv'), index=False)
```
#### - If clustering based on t-SNE maps
```
# # cluster information
# stat = Xout[['km_label','Alt']].groupby('km_label').apply(np.mean)
# stat = stat.sort_values('Alt')
# stat.columns = ['km_label', 'class_alt']
# # center information
# centers = km.cluster_centers_
# myk = km.cluster_centers_.shape[0]
# centx, centy = list(), list()
# for i in range(stat.shape[0]):
# centx.append(centers[int(stat.iloc[i]['km_label']),0])
# centy.append(centers[int(stat.iloc[i]['km_label']),1])
# # add center information into clustere information
# stat['center_x'] = centx
# stat['center_y'] = centy
# stat['order'] = np.arange(myk)
# # output cluster information
# stat.to_csv(os.path.join(model_dirname,'cluster_center.csv'), index=False)
```
#### - If clustering based on 2048D features
```
from sklearn.metrics.pairwise import pairwise_distances
# cluster information
stat = Xout[['km_label','Alt']].groupby('km_label').apply(np.mean)
stat = stat.sort_values('km_label')
stat.columns = ['km_label', 'class_alt']
# center information
centers = km.cluster_centers_
myk = km.cluster_centers_.shape[0]
centx, centy = list(), list()
for i in range(myk):
center = centers[i:(i+1),:]
sel = np.where(km.labels_==i)[0]
nearest_species = np.argmin(pairwise_distances(X=center, Y=Fout2[sel], metric='cosine'))
i_nearest_species = sel[nearest_species]
centx.append(F_embedded[i_nearest_species, 0])
centy.append(F_embedded[i_nearest_species, 1])
# add center information into clustere information
stat['center_x'] = centx
stat['center_y'] = centy
stat = stat.sort_values('class_alt')
# stat.columns = ['km_label', 'class_alt']
stat['order'] = np.arange(myk)
# output cluster information
stat.to_csv(os.path.join(model_dirname,'cluster_center.csv'), index=False)
```
### Step 5.
```
# compress 2048-D features to 1-D trait for functional trait analysis
TestImg = list()
for i in range(Xtest.shape[0]):
img = io.imread(list(Xtest['img_rmbg_path'])[i])
TestImg.append(img)
TestImg = np.stack(TestImg)
TestInput = preprocess_input(TestImg.astype(float))
Ftest = extractor.predict(x=TestInput)
from sklearn.manifold import TSNE
F_trait = TSNE(n_components=1, perplexity=100).fit_transform(Ftest)
F_trait = F_trait - np.min(F_trait)
Xtest['trait'] = F_trait[:,0]
np.save(file='Species_TestingInstance_4249x2048.npy', arr=Ftest)
# image trait information table
dtrait = pd.merge(Xtest[['Species', 'trait']], resout[['Species','km_label','alt']], how='left', on='Species')
dtrait.to_csv(os.path.join(model_dirname, 'trait_analysis.csv'), index=False)
```
### Step 6.
```
# calculate in-cluster pairwise distance
from sklearn.metrics.pairwise import pairwise_distances
# just convert the cluster labels to be ordered for better visualization in the next analysis
km_label_to_order = dict()
order_to_km_label = dict()
for i in range(myk):
km_label_to_order[int(stat.iloc[i]['km_label'])] = i
order_to_km_label[i] = int(stat.iloc[i]['km_label'])
pair_diversity = np.array([])
order = np.array([])
for k in range(myk):
this_km_label = order_to_km_label[k]
sel = np.where(resout.km_label == this_km_label)[0]
if len(sel) == 1:
t = np.array([[0]])
dist_list = np.array([0])
else:
t = pairwise_distances(Fout[sel, :], metric='cosine')
dist_list = np.array([])
for i in range(t.shape[0]):
dist_list = np.append(dist_list,t[i,(i+1):])
pair_diversity = np.append(pair_diversity, dist_list)
order = np.append(order, np.repeat(k, len(dist_list)))
di = pd.DataFrame({'diversity': pair_diversity,
'order': order})
di.to_csv(os.path.join(model_dirname, 'in-cluser_pairwise_diversity.csv'), index=False)
```
| github_jupyter |
##### Copyright 2020 The OpenFermion Developers
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Introduction to OpenFermion
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://quantumai.google/openfermion/tutorials/intro_to_openfermion"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/quantumlib/OpenFermion/blob/master/docs/tutorials/intro_to_openfermion.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/quantumlib/OpenFermion/blob/master/docs/tutorials/intro_to_openfermion.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/OpenFermion/docs/tutorials/intro_to_openfermion.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a>
</td>
</table>
Note: The examples below must be run sequentially within a section.
## Setup
Install the OpenFermion package:
```
try:
import openfermion
except ImportError:
!pip install git+https://github.com/quantumlib/OpenFermion.git@master#egg=openfermion
```
## Initializing the FermionOperator data structure
Fermionic systems are often treated in second quantization where arbitrary operators can be expressed using the fermionic creation and annihilation operators, $a^\dagger_k$ and $a_k$. The fermionic ladder operators play a similar role to their qubit ladder operator counterparts, $\sigma^+_k$ and $\sigma^-_k$ but are distinguished by the canonical fermionic anticommutation relations, $\{a^\dagger_i, a^\dagger_j\} = \{a_i, a_j\} = 0$ and $\{a_i, a_j^\dagger\} = \delta_{ij}$. Any weighted sums of products of these operators are represented with the FermionOperator data structure in OpenFermion. The following are examples of valid FermionOperators:
$$
\begin{align}
& a_1 \nonumber \\
& 1.7 a^\dagger_3 \nonumber \\
&-1.7 \, a^\dagger_3 a_1 \nonumber \\
&(1 + 2i) \, a^\dagger_4 a^\dagger_3 a_9 a_1 \nonumber \\
&(1 + 2i) \, a^\dagger_4 a^\dagger_3 a_9 a_1 - 1.7 \, a^\dagger_3 a_1 \nonumber
\end{align}
$$
The FermionOperator class is contained in $\textrm{ops/_fermion_operator.py}$. In order to support fast addition of FermionOperator instances, the class is implemented as hash table (python dictionary). The keys of the dictionary encode the strings of ladder operators and values of the dictionary store the coefficients. The strings of ladder operators are encoded as a tuple of 2-tuples which we refer to as the "terms tuple". Each ladder operator is represented by a 2-tuple. The first element of the 2-tuple is an int indicating the tensor factor on which the ladder operator acts. The second element of the 2-tuple is Boole: 1 represents raising and 0 represents lowering. For instance, $a^\dagger_8$ is represented in a 2-tuple as $(8, 1)$. Note that indices start at 0 and the identity operator is an empty list. Below we give some examples of operators and their terms tuple:
$$
\begin{align}
I & \mapsto () \nonumber \\
a_1 & \mapsto ((1, 0),) \nonumber \\
a^\dagger_3 & \mapsto ((3, 1),) \nonumber \\
a^\dagger_3 a_1 & \mapsto ((3, 1), (1, 0)) \nonumber \\
a^\dagger_4 a^\dagger_3 a_9 a_1 & \mapsto ((4, 1), (3, 1), (9, 0), (1, 0)) \nonumber
\end{align}
$$
Note that when initializing a single ladder operator one should be careful to add the comma after the inner pair. This is because in python ((1, 2)) = (1, 2) whereas ((1, 2),) = ((1, 2),). The "terms tuple" is usually convenient when one wishes to initialize a term as part of a coded routine. However, the terms tuple is not particularly intuitive. Accordingly, OpenFermion also supports another user-friendly, string notation below. This representation is rendered when calling "print" on a FermionOperator.
$$
\begin{align}
I & \mapsto \textrm{""} \nonumber \\
a_1 & \mapsto \textrm{"1"} \nonumber \\
a^\dagger_3 & \mapsto \textrm{"3^"} \nonumber \\
a^\dagger_3 a_1 & \mapsto \textrm{"3^}\;\textrm{1"} \nonumber \\
a^\dagger_4 a^\dagger_3 a_9 a_1 & \mapsto \textrm{"4^}\;\textrm{3^}\;\textrm{9}\;\textrm{1"} \nonumber
\end{align}
$$
Let's initialize our first term! We do it two different ways below.
```
from openfermion.ops import FermionOperator
my_term = FermionOperator(((3, 1), (1, 0)))
print(my_term)
my_term = FermionOperator('3^ 1')
print(my_term)
```
The preferred way to specify the coefficient in openfermion is to provide an optional coefficient argument. If not provided, the coefficient defaults to 1. In the code below, the first method is preferred. The multiplication in the second method actually creates a copy of the term, which introduces some additional cost. All inplace operands (such as +=) modify classes whereas binary operands such as + create copies. Important caveats are that the empty tuple FermionOperator(()) and the empty string FermionOperator('') initializes identity. The empty initializer FermionOperator() initializes the zero operator.
```
good_way_to_initialize = FermionOperator('3^ 1', -1.7)
print(good_way_to_initialize)
bad_way_to_initialize = -1.7 * FermionOperator('3^ 1')
print(bad_way_to_initialize)
identity = FermionOperator('')
print(identity)
zero_operator = FermionOperator()
print(zero_operator)
```
Note that FermionOperator has only one attribute: .terms. This attribute is the dictionary which stores the term tuples.
```
my_operator = FermionOperator('4^ 1^ 3 9', 1. + 2.j)
print(my_operator)
print(my_operator.terms)
```
## Manipulating the FermionOperator data structure
So far we have explained how to initialize a single FermionOperator such as $-1.7 \, a^\dagger_3 a_1$. However, in general we will want to represent sums of these operators such as $(1 + 2i) \, a^\dagger_4 a^\dagger_3 a_9 a_1 - 1.7 \, a^\dagger_3 a_1$. To do this, just add together two FermionOperators! We demonstrate below.
```
from openfermion.ops import FermionOperator
term_1 = FermionOperator('4^ 3^ 9 1', 1. + 2.j)
term_2 = FermionOperator('3^ 1', -1.7)
my_operator = term_1 + term_2
print(my_operator)
my_operator = FermionOperator('4^ 3^ 9 1', 1. + 2.j)
term_2 = FermionOperator('3^ 1', -1.7)
my_operator += term_2
print('')
print(my_operator)
```
The print function prints each term in the operator on a different line. Note that the line my_operator = term_1 + term_2 creates a new object, which involves a copy of term_1 and term_2. The second block of code uses the inplace method +=, which is more efficient. This is especially important when trying to construct a very large FermionOperator. FermionOperators also support a wide range of builtins including, str(), repr(), ==, !=, *=, *, /, /=, +, +=, -, -=, - and **. Note that since FermionOperators involve floats, == and != check for (in)equality up to numerical precision. We demonstrate some of these methods below.
```
term_1 = FermionOperator('4^ 3^ 9 1', 1. + 2.j)
term_2 = FermionOperator('3^ 1', -1.7)
my_operator = term_1 - 33. * term_2
print(my_operator)
my_operator *= 3.17 * (term_2 + term_1) ** 2
print('')
print(my_operator)
print('')
print(term_2 ** 3)
print('')
print(term_1 == 2.*term_1 - term_1)
print(term_1 == my_operator)
```
Additionally, there are a variety of methods that act on the FermionOperator data structure. We demonstrate a small subset of those methods here.
```
from openfermion.utils import commutator, count_qubits, hermitian_conjugated
from openfermion.transforms import normal_ordered
# Get the Hermitian conjugate of a FermionOperator, count its qubit, check if it is normal-ordered.
term_1 = FermionOperator('4^ 3 3^', 1. + 2.j)
print(hermitian_conjugated(term_1))
print(term_1.is_normal_ordered())
print(count_qubits(term_1))
# Normal order the term.
term_2 = normal_ordered(term_1)
print('')
print(term_2)
print(term_2.is_normal_ordered())
# Compute a commutator of the terms.
print('')
print(commutator(term_1, term_2))
```
## The QubitOperator data structure
The QubitOperator data structure is another essential part of openfermion. As the name suggests, QubitOperator is used to store qubit operators in almost exactly the same way that FermionOperator is used to store fermion operators. For instance $X_0 Z_3 Y_4$ is a QubitOperator. The internal representation of this as a terms tuple would be $((0, \textrm{"X"}), (3, \textrm{"Z"}), (4, \textrm{"Y"}))$. Note that one important difference between QubitOperator and FermionOperator is that the terms in QubitOperator are always sorted in order of tensor factor. In some cases, this enables faster manipulation. We initialize some QubitOperators below.
```
from openfermion.ops import QubitOperator
my_first_qubit_operator = QubitOperator('X1 Y2 Z3')
print(my_first_qubit_operator)
print(my_first_qubit_operator.terms)
operator_2 = QubitOperator('X3 Z4', 3.17)
operator_2 -= 77. * my_first_qubit_operator
print('')
print(operator_2)
```
## Jordan-Wigner and Bravyi-Kitaev
openfermion provides functions for mapping FermionOperators to QubitOperators.
```
from openfermion.ops import FermionOperator
from openfermion.transforms import jordan_wigner, bravyi_kitaev
from openfermion.utils import hermitian_conjugated
from openfermion.linalg import eigenspectrum
# Initialize an operator.
fermion_operator = FermionOperator('2^ 0', 3.17)
fermion_operator += hermitian_conjugated(fermion_operator)
print(fermion_operator)
# Transform to qubits under the Jordan-Wigner transformation and print its spectrum.
jw_operator = jordan_wigner(fermion_operator)
print('')
print(jw_operator)
jw_spectrum = eigenspectrum(jw_operator)
print(jw_spectrum)
# Transform to qubits under the Bravyi-Kitaev transformation and print its spectrum.
bk_operator = bravyi_kitaev(fermion_operator)
print('')
print(bk_operator)
bk_spectrum = eigenspectrum(bk_operator)
print(bk_spectrum)
```
We see that despite the different representation, these operators are iso-spectral. We can also apply the Jordan-Wigner transform in reverse to map arbitrary QubitOperators to FermionOperators. Note that we also demonstrate the .compress() method (a method on both FermionOperators and QubitOperators) which removes zero entries.
```
from openfermion.transforms import reverse_jordan_wigner
# Initialize QubitOperator.
my_operator = QubitOperator('X0 Y1 Z2', 88.)
my_operator += QubitOperator('Z1 Z4', 3.17)
print(my_operator)
# Map QubitOperator to a FermionOperator.
mapped_operator = reverse_jordan_wigner(my_operator)
print('')
print(mapped_operator)
# Map the operator back to qubits and make sure it is the same.
back_to_normal = jordan_wigner(mapped_operator)
back_to_normal.compress()
print('')
print(back_to_normal)
```
## Sparse matrices and the Hubbard model
Often, one would like to obtain a sparse matrix representation of an operator which can be analyzed numerically. There is code in both openfermion.transforms and openfermion.utils which facilitates this. The function get_sparse_operator converts either a FermionOperator, a QubitOperator or other more advanced classes such as InteractionOperator to a scipy.sparse.csc matrix. There are numerous functions in openfermion.utils which one can call on the sparse operators such as "get_gap", "get_hartree_fock_state", "get_ground_state", etc. We show this off by computing the ground state energy of the Hubbard model. To do that, we use code from the openfermion.hamiltonians module which constructs lattice models of fermions such as Hubbard models.
```
from openfermion.hamiltonians import fermi_hubbard
from openfermion.linalg import get_sparse_operator, get_ground_state
from openfermion.transforms import jordan_wigner
# Set model.
x_dimension = 2
y_dimension = 2
tunneling = 2.
coulomb = 1.
magnetic_field = 0.5
chemical_potential = 0.25
periodic = 1
spinless = 1
# Get fermion operator.
hubbard_model = fermi_hubbard(
x_dimension, y_dimension, tunneling, coulomb, chemical_potential,
magnetic_field, periodic, spinless)
print(hubbard_model)
# Get qubit operator under Jordan-Wigner.
jw_hamiltonian = jordan_wigner(hubbard_model)
jw_hamiltonian.compress()
print('')
print(jw_hamiltonian)
# Get scipy.sparse.csc representation.
sparse_operator = get_sparse_operator(hubbard_model)
print('')
print(sparse_operator)
print('\nEnergy of the model is {} in units of T and J.'.format(
get_ground_state(sparse_operator)[0]))
```
## Hamiltonians in the plane wave basis
A user can write plugins to openfermion which allow for the use of, e.g., third-party electronic structure package to compute molecular orbitals, Hamiltonians, energies, reduced density matrices, coupled cluster amplitudes, etc using Gaussian basis sets. We may provide scripts which interface between such packages and openfermion in future but do not discuss them in this tutorial.
When using simpler basis sets such as plane waves, these packages are not needed. openfermion comes with code which computes Hamiltonians in the plane wave basis. Note that when using plane waves, one is working with the periodized Coulomb operator, best suited for condensed phase calculations such as studying the electronic structure of a solid. To obtain these Hamiltonians one must choose to study the system without a spin degree of freedom (spinless), one must the specify dimension in which the calculation is performed (n_dimensions, usually 3), one must specify how many plane waves are in each dimension (grid_length) and one must specify the length scale of the plane wave harmonics in each dimension (length_scale) and also the locations and charges of the nuclei. One can generate these models with plane_wave_hamiltonian() found in openfermion.hamiltonians. For simplicity, below we compute the Hamiltonian in the case of zero external charge (corresponding to the uniform electron gas, aka jellium). We also demonstrate that one can transform the plane wave Hamiltonian using a Fourier transform without effecting the spectrum of the operator.
```
from openfermion.hamiltonians import jellium_model
from openfermion.utils import Grid
from openfermion.linalg import eigenspectrum
from openfermion.transforms import jordan_wigner, fourier_transform
# Let's look at a very small model of jellium in 1D.
grid = Grid(dimensions=1, length=3, scale=1.0)
spinless = True
# Get the momentum Hamiltonian.
momentum_hamiltonian = jellium_model(grid, spinless)
momentum_qubit_operator = jordan_wigner(momentum_hamiltonian)
momentum_qubit_operator.compress()
print(momentum_qubit_operator)
# Fourier transform the Hamiltonian to the position basis.
position_hamiltonian = fourier_transform(momentum_hamiltonian, grid, spinless)
position_qubit_operator = jordan_wigner(position_hamiltonian)
position_qubit_operator.compress()
print('')
print (position_qubit_operator)
# Check the spectra to make sure these representations are iso-spectral.
spectral_difference = eigenspectrum(momentum_qubit_operator) - eigenspectrum(position_qubit_operator)
print('')
print(spectral_difference)
```
## Basics of MolecularData class
Data from electronic structure calculations can be saved in an OpenFermion data structure called MolecularData, which makes it easy to access within our library. Often, one would like to analyze a chemical series or look at many different Hamiltonians and sometimes the electronic structure calculations are either expensive to compute or difficult to converge (e.g. one needs to mess around with different types of SCF routines to make things converge). Accordingly, we anticipate that users will want some way to automatically database the results of their electronic structure calculations so that important data (such as the SCF integrals) can be looked up on-the-fly if the user has computed them in the past. OpenFermion supports a data provenance strategy which saves key results of the electronic structure calculation (including pointers to files containing large amounts of data, such as the molecular integrals) in an HDF5 container.
The MolecularData class stores information about molecules. One initializes a MolecularData object by specifying parameters of a molecule such as its geometry, basis, multiplicity, charge and an optional string describing it. One can also initialize MolecularData simply by providing a string giving a filename where a previous MolecularData object was saved in an HDF5 container. One can save a MolecularData instance by calling the class's .save() method. This automatically saves the instance in a data folder specified during OpenFermion installation. The name of the file is generated automatically from the instance attributes and optionally provided description. Alternatively, a filename can also be provided as an optional input if one wishes to manually name the file.
When electronic structure calculations are run, the data files for the molecule can be automatically updated. If one wishes to later use that data they either initialize MolecularData with the instance filename or initialize the instance and then later call the .load() method.
Basis functions are provided to initialization using a string such as "6-31g". Geometries can be specified using a simple txt input file (see geometry_from_file function in molecular_data.py) or can be passed using a simple python list format demonstrated below. Atoms are specified using a string for their atomic symbol. Distances should be provided in angstrom. Below we initialize a simple instance of MolecularData without performing any electronic structure calculations.
```
from openfermion.chem import MolecularData
# Set parameters to make a simple molecule.
diatomic_bond_length = .7414
geometry = [('H', (0., 0., 0.)), ('H', (0., 0., diatomic_bond_length))]
basis = 'sto-3g'
multiplicity = 1
charge = 0
description = str(diatomic_bond_length)
# Make molecule and print out a few interesting facts about it.
molecule = MolecularData(geometry, basis, multiplicity,
charge, description)
print('Molecule has automatically generated name {}'.format(
molecule.name))
print('Information about this molecule would be saved at:\n{}\n'.format(
molecule.filename))
print('This molecule has {} atoms and {} electrons.'.format(
molecule.n_atoms, molecule.n_electrons))
for atom, atomic_number in zip(molecule.atoms, molecule.protons):
print('Contains {} atom, which has {} protons.'.format(
atom, atomic_number))
```
If we had previously computed this molecule using an electronic structure package, we can call molecule.load() to populate all sorts of interesting fields in the data structure. Though we make no assumptions about what electronic structure packages users might install, we assume that the calculations are saved in OpenFermion's MolecularData objects. Currently plugins are available for [Psi4](http://psicode.org/) [(OpenFermion-Psi4)](http://github.com/quantumlib/OpenFermion-Psi4) and [PySCF](https://github.com/sunqm/pyscf) [(OpenFermion-PySCF)](http://github.com/quantumlib/OpenFermion-PySCF), and there may be more in the future. For the purposes of this example, we will load data that ships with OpenFermion to make a plot of the energy surface of hydrogen. Note that helper functions to initialize some interesting chemical benchmarks are found in openfermion.utils.
```
# Set molecule parameters.
basis = 'sto-3g'
multiplicity = 1
bond_length_interval = 0.1
n_points = 25
# Generate molecule at different bond lengths.
hf_energies = []
fci_energies = []
bond_lengths = []
for point in range(3, n_points + 1):
bond_length = bond_length_interval * point
bond_lengths += [bond_length]
description = str(round(bond_length,2))
print(description)
geometry = [('H', (0., 0., 0.)), ('H', (0., 0., bond_length))]
molecule = MolecularData(
geometry, basis, multiplicity, description=description)
# Load data.
molecule.load()
# Print out some results of calculation.
print('\nAt bond length of {} angstrom, molecular hydrogen has:'.format(
bond_length))
print('Hartree-Fock energy of {} Hartree.'.format(molecule.hf_energy))
print('MP2 energy of {} Hartree.'.format(molecule.mp2_energy))
print('FCI energy of {} Hartree.'.format(molecule.fci_energy))
print('Nuclear repulsion energy between protons is {} Hartree.'.format(
molecule.nuclear_repulsion))
for orbital in range(molecule.n_orbitals):
print('Spatial orbital {} has energy of {} Hartree.'.format(
orbital, molecule.orbital_energies[orbital]))
hf_energies += [molecule.hf_energy]
fci_energies += [molecule.fci_energy]
# Plot.
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(0)
plt.plot(bond_lengths, fci_energies, 'x-')
plt.plot(bond_lengths, hf_energies, 'o-')
plt.ylabel('Energy in Hartree')
plt.xlabel('Bond length in angstrom')
plt.show()
```
The geometry data needed to generate MolecularData can also be retreived from the PubChem online database by inputting the molecule's name.
```
from openfermion.chem import geometry_from_pubchem
methane_geometry = geometry_from_pubchem('methane')
print(methane_geometry)
```
## InteractionOperator and InteractionRDM for efficient numerical representations
Fermion Hamiltonians can be expressed as $H = h_0 + \sum_{pq} h_{pq}\, a^\dagger_p a_q + \frac{1}{2} \sum_{pqrs} h_{pqrs} \, a^\dagger_p a^\dagger_q a_r a_s$ where $h_0$ is a constant shift due to the nuclear repulsion and $h_{pq}$ and $h_{pqrs}$ are the famous molecular integrals. Since fermions interact pairwise, their energy is thus a unique function of the one-particle and two-particle reduced density matrices which are expressed in second quantization as $\rho_{pq} = \left \langle p \mid a^\dagger_p a_q \mid q \right \rangle$ and $\rho_{pqrs} = \left \langle pq \mid a^\dagger_p a^\dagger_q a_r a_s \mid rs \right \rangle$, respectively.
Because the RDMs and molecular Hamiltonians are both compactly represented and manipulated as 2- and 4- index tensors, we can represent them in a particularly efficient form using similar data structures. The InteractionOperator data structure can be initialized for a Hamiltonian by passing the constant $h_0$ (or 0), as well as numpy arrays representing $h_{pq}$ (or $\rho_{pq}$) and $h_{pqrs}$ (or $\rho_{pqrs}$). Importantly, InteractionOperators can also be obtained by calling MolecularData.get_molecular_hamiltonian() or by calling the function get_interaction_operator() (found in openfermion.transforms) on a FermionOperator. The InteractionRDM data structure is similar but represents RDMs. For instance, one can get a molecular RDM by calling MolecularData.get_molecular_rdm(). When generating Hamiltonians from the MolecularData class, one can choose to restrict the system to an active space.
These classes inherit from the same base class, PolynomialTensor. This data structure overloads the slice operator [] so that one can get or set the key attributes of the InteractionOperator: $\textrm{.constant}$, $\textrm{.one_body_coefficients}$ and $\textrm{.two_body_coefficients}$ . For instance, InteractionOperator[(p, 1), (q, 1), (r, 0), (s, 0)] would return $h_{pqrs}$ and InteractionRDM would return $\rho_{pqrs}$. Importantly, the class supports fast basis transformations using the method PolynomialTensor.rotate_basis(rotation_matrix).
But perhaps most importantly, one can map the InteractionOperator to any of the other data structures we've described here.
Below, we load MolecularData from a saved calculation of LiH. We then obtain an InteractionOperator representation of this system in an active space. We then map that operator to qubits. We then demonstrate that one can rotate the orbital basis of the InteractionOperator using random angles to obtain a totally different operator that is still iso-spectral.
```
from openfermion.chem import MolecularData
from openfermion.transforms import get_fermion_operator, jordan_wigner
from openfermion.linalg import get_ground_state, get_sparse_operator
import numpy
import scipy
import scipy.linalg
# Load saved file for LiH.
diatomic_bond_length = 1.45
geometry = [('Li', (0., 0., 0.)), ('H', (0., 0., diatomic_bond_length))]
basis = 'sto-3g'
multiplicity = 1
# Set Hamiltonian parameters.
active_space_start = 1
active_space_stop = 3
# Generate and populate instance of MolecularData.
molecule = MolecularData(geometry, basis, multiplicity, description="1.45")
molecule.load()
# Get the Hamiltonian in an active space.
molecular_hamiltonian = molecule.get_molecular_hamiltonian(
occupied_indices=range(active_space_start),
active_indices=range(active_space_start, active_space_stop))
# Map operator to fermions and qubits.
fermion_hamiltonian = get_fermion_operator(molecular_hamiltonian)
qubit_hamiltonian = jordan_wigner(fermion_hamiltonian)
qubit_hamiltonian.compress()
print('The Jordan-Wigner Hamiltonian in canonical basis follows:\n{}'.format(qubit_hamiltonian))
# Get sparse operator and ground state energy.
sparse_hamiltonian = get_sparse_operator(qubit_hamiltonian)
energy, state = get_ground_state(sparse_hamiltonian)
print('Ground state energy before rotation is {} Hartree.\n'.format(energy))
# Randomly rotate.
n_orbitals = molecular_hamiltonian.n_qubits // 2
n_variables = int(n_orbitals * (n_orbitals - 1) / 2)
numpy.random.seed(1)
random_angles = numpy.pi * (1. - 2. * numpy.random.rand(n_variables))
kappa = numpy.zeros((n_orbitals, n_orbitals))
index = 0
for p in range(n_orbitals):
for q in range(p + 1, n_orbitals):
kappa[p, q] = random_angles[index]
kappa[q, p] = -numpy.conjugate(random_angles[index])
index += 1
# Build the unitary rotation matrix.
difference_matrix = kappa + kappa.transpose()
rotation_matrix = scipy.linalg.expm(kappa)
# Apply the unitary.
molecular_hamiltonian.rotate_basis(rotation_matrix)
# Get qubit Hamiltonian in rotated basis.
qubit_hamiltonian = jordan_wigner(molecular_hamiltonian)
qubit_hamiltonian.compress()
print('The Jordan-Wigner Hamiltonian in rotated basis follows:\n{}'.format(qubit_hamiltonian))
# Get sparse Hamiltonian and energy in rotated basis.
sparse_hamiltonian = get_sparse_operator(qubit_hamiltonian)
energy, state = get_ground_state(sparse_hamiltonian)
print('Ground state energy after rotation is {} Hartree.'.format(energy))
```
## Quadratic Hamiltonians and Slater determinants
The general electronic structure Hamiltonian
$H = h_0 + \sum_{pq} h_{pq}\, a^\dagger_p a_q + \frac{1}{2} \sum_{pqrs} h_{pqrs} \, a^\dagger_p a^\dagger_q a_r a_s$ contains terms that act on up to 4 sites, or
is quartic in the fermionic creation and annihilation operators. However, in many situations
we may fruitfully approximate these Hamiltonians by replacing these quartic terms with
terms that act on at most 2 fermionic sites, or quadratic terms, as in mean-field approximation theory.
These Hamiltonians have a number of
special properties one can exploit for efficient simulation and manipulation of the Hamiltonian, thus
warranting a special data structure. We refer to Hamiltonians which
only contain terms that are quadratic in the fermionic creation and annihilation operators
as quadratic Hamiltonians, and include the general case of non-particle conserving terms as in
a general Bogoliubov transformation. Eigenstates of quadratic Hamiltonians can be prepared
efficiently on both a quantum and classical computer, making them amenable to initial guesses for
many more challenging problems.
A general quadratic Hamiltonian takes the form
$$H = \sum_{p, q} (M_{pq} - \mu \delta_{pq}) a^\dagger_p a_q + \frac{1}{2} \sum_{p, q} (\Delta_{pq} a^\dagger_p a^\dagger_q + \Delta_{pq}^* a_q a_p) + \text{constant},$$
where $M$ is a Hermitian matrix, $\Delta$ is an antisymmetric matrix,
$\delta_{pq}$ is the Kronecker delta symbol, and $\mu$ is a chemical
potential term which we keep separate from $M$ so that we can use it
to adjust the expectation of the total number of particles.
In OpenFermion, quadratic Hamiltonians are conveniently represented and manipulated
using the QuadraticHamiltonian class, which stores $M$, $\Delta$, $\mu$ and the constant. It is specialized to exploit the properties unique to quadratic Hamiltonians. Like InteractionOperator and InteractionRDM, it inherits from the PolynomialTensor class.
The BCS mean-field model of superconductivity is a quadratic Hamiltonian. The following code constructs an instance of this model as a FermionOperator, converts it to a QuadraticHamiltonian, and then computes its ground energy:
```
from openfermion.hamiltonians import mean_field_dwave
from openfermion.transforms import get_quadratic_hamiltonian
# Set model.
x_dimension = 2
y_dimension = 2
tunneling = 2.
sc_gap = 1.
periodic = True
# Get FermionOperator.
mean_field_model = mean_field_dwave(
x_dimension, y_dimension, tunneling, sc_gap, periodic=periodic)
# Convert to QuadraticHamiltonian
quadratic_hamiltonian = get_quadratic_hamiltonian(mean_field_model)
# Compute the ground energy
ground_energy = quadratic_hamiltonian.ground_energy()
print(ground_energy)
```
Any quadratic Hamiltonian may be rewritten in the form
$$H = \sum_p \varepsilon_p b^\dagger_p b_p + \text{constant},$$
where the $b_p$ are new annihilation operators that satisfy the fermionic anticommutation relations, and which are linear combinations of the old creation and annihilation operators. This form of $H$ makes it easy to deduce its eigenvalues; they are sums of subsets of the $\varepsilon_p$, which we call the orbital energies of $H$. The following code computes the orbital energies and the constant:
```
orbital_energies, constant = quadratic_hamiltonian.orbital_energies()
print(orbital_energies)
print()
print(constant)
```
Eigenstates of quadratic hamiltonians are also known as fermionic Gaussian states, and they can be prepared efficiently on a quantum computer. One can use OpenFermion to obtain circuits for preparing these states. The following code obtains the description of a circuit which prepares the ground state (operations that can be performed in parallel are grouped together), along with a description of the starting state to which the circuit should be applied:
```
from openfermion.circuits import gaussian_state_preparation_circuit
circuit_description, start_orbitals = gaussian_state_preparation_circuit(quadratic_hamiltonian)
for parallel_ops in circuit_description:
print(parallel_ops)
print('')
print(start_orbitals)
```
In the circuit description, each elementary operation is either a tuple of the form $(i, j, \theta, \varphi)$, indicating the operation $\exp[i \varphi a_j^\dagger a_j]\exp[\theta (a_i^\dagger a_j - a_j^\dagger a_i)]$, which is a Givens rotation of modes $i$ and $j$, or the string 'pht', indicating the particle-hole transformation on the last fermionic mode, which is the operator $\mathcal{B}$ such that $\mathcal{B} a_N \mathcal{B}^\dagger = a_N^\dagger$ and leaves the rest of the ladder operators unchanged. Operations that can be performed in parallel are grouped together.
In the special case that a quadratic Hamiltonian conserves particle number ($\Delta = 0$), its eigenstates take the form
$$\lvert \Psi_S \rangle = b^\dagger_{1}\cdots b^\dagger_{N_f}\lvert \text{vac} \rangle,\qquad
b^\dagger_{p} = \sum_{k=1}^N Q_{pq}a^\dagger_q,$$
where $Q$ is an $N_f \times N$ matrix with orthonormal rows. These states are also known as Slater determinants. OpenFermion also provides functionality to obtain circuits for preparing Slater determinants starting with the matrix $Q$ as the input.
| github_jupyter |
# Evaluate a trained encoder
Notebook Author: Aniket Tekawade, Argonne National Laboratory, atekawade@anl.gov
This notebook will run some tests on a trained encoder-decoder model to (1) visualize the latent space clusters (2) evaluate segmentation accuracy (3) something else (4) and something else.
### Set paths and arguments
```
from tensorflow.config.experimental import *
GPU_mem_limit = 16.0
gpus = list_physical_devices('GPU')
if gpus:
try:
set_virtual_device_configuration(gpus[0], [VirtualDeviceConfiguration(memory_limit=GPU_mem_limit*1000.0)])
except RuntimeError as e:
print(e)
#what is if gpus?
# paths
model_path = "/data02/MyArchive/aisteer_3Dencoders/models"
csv_path = "/data02/MyArchive/aisteer_3Dencoders/data_TomoTwin/datalist_train.csv"
# arguments
n_samples = 5000
model_tag = "111d32_set6"
noise_level = 0.18
patch_size = tuple([64]*3)
binning = 2
latent_dims = int(model_tag.split('_')[0].split('d')[-1])
#so this is only for the 111-32?
%matplotlib inline
import sys
import os
import numpy as np
import pandas as pd
import h5py
import time
import matplotlib.pyplot as plt
import matplotlib as mpl
from tomo_encoders.img_stats import Parallelize, calc_jac_acc, pore_analysis
from tomo_encoders.data_sampling import data_generator_4D, get_data_from_flist
from tomo_encoders.porosity_encoders import custom_objects_dict
from tomo_encoders.latent_vis import *
from tomo_encoders.feature_maps_vis import view_midplanes
from tensorflow.keras.models import load_model
import pickle
figw = 8
import seaborn as sns
sns.set(font_scale = 1)
sns.set_style("whitegrid", {'axes.grid' : False})
```
### Load the trained model
```
model_names = {"segmenter" : "segmenter%s.hdf5"%model_tag, \
"encoder" : "encoder%s.hdf5"%model_tag, \
"PCA" : "PCA%s.pkl"%model_tag}
encoder = load_model(os.path.join(model_path, model_names["encoder"]), \
custom_objects = custom_objects_dict)
segmenter = load_model(os.path.join(model_path, model_names["segmenter"]), \
custom_objects = custom_objects_dict)
```
### Load the data and draw $64^3$ sized samples
```
Xs, Ys, plot_labels = get_data_from_flist(csv_path, \
normalize = True,\
data_tags = ("recon", "gt_labels"),\
group_tags = ["tomo"],\
downres = binning)
dg = data_generator_4D(Xs, Ys, \
patch_size, n_samples, \
scan_idx = True, add_noise = noise_level)
x, y, sample_labels = next(dg)
print("Shape of x: %s"%str(x.shape))
```
**Histogram of sampled patches:** How many patches are drawn from each dataset?
```
tmp_labels = [plot_label.split('train_blobs_')[-1] for plot_label in plot_labels]
sample_hist = pd.DataFrame(columns = ["label", "n_pts"])
sample_hist["label"] = tmp_labels
sample_hist = sample_hist.set_index("label")
for idx, sample_lab in enumerate(tmp_labels):
sample_hist.loc[sample_lab, "n_pts"] = np.size(np.where(sample_labels == idx))
# sample_hist
sample_hist.plot.barh()
```
### Get output from decoder and encoder
**Encoder output** Get latent vector, then apply PCA.
```
dfN = get_latent_vector(encoder, x, sample_labels, plot_labels)
pkl_filename = os.path.join(model_path, model_names["PCA"])
# Load from file
with open(pkl_filename, 'rb') as file:
pca = pickle.load(file)
ncomps = 2
df = transform_PCA(dfN, latent_dims, pca, ncomps = ncomps)
df["${||{h}||}$"] = np.linalg.norm(dfN[["$h_%i$"%i for i in range(latent_dims)]], axis = 1)
df = rescale_z(df)
```
**Decoder output** How does the segmented output look?
```
# accuracy of segmentation using intersection-over-union (IoU)
yp = segmenter.predict(x)
yp = np.round(yp)
df["IoU"] = Parallelize(list(zip(y, yp)), calc_jac_acc, procs = 48)
nplots = 3
fig, ax = plt.subplots(nplots,3, figsize = (8,3*nplots))
for ii in range(nplots):
view_midplanes(vol = x[ii,...,0], ax = ax[ii])
view_midplanes(vol = yp[ii,...,0], ax = ax[ii], cmap = "copper", alpha = 0.3)
ax[ii,0].set_ylabel(tmp_labels[sample_labels[ii]])
fig.tight_layout()
```
**Calculate some metadata** to further understand cluster distances (future use)
```
# ground-truth porosity values (calculated after applying connected components filter)
f = Parallelize(y, pore_analysis, procs = 48)
f = np.asarray(f)
df[["void-fraction", "npores", "pore-vol"]] = f
# df_temp = pd.DataFrame(columns = ["void-fraction", "npores", "pore-vol"], data = f)
# signal-to-noise ratio (SNR)
f = Parallelize(list(zip(x,y)), calc_SNR, procs = 48)
f = np.asarray(f)
df_temp = pd.DataFrame(columns = ["SNR"], data = f.reshape(-1,1))
df["SNR"] = f
df.head()
```
### Analyze Segmentation Accuracy
**Compare accuracy with porosity metrics:** Is accuracy sensitive to pore size?
```
modelid = model_tag.split('_')[0].split('d')
modelid = ('-').join(modelid)
bins = ["10-12", "12-14", "14-16", "16-18"]
IoU_binned = []
IoU_binned.append(np.mean(df[df["param"].between(10,11,inclusive = True)]["IoU"]))
IoU_binned.append(np.mean(df[df["param"].between(12,13,inclusive = True)]["IoU"]))
IoU_binned.append(np.mean(df[df["param"].between(14,15,inclusive = True)]["IoU"]))
IoU_binned.append(np.mean(df[df["param"].between(16,18,inclusive = True)]["IoU"]))
fig, ax = plt.subplots(1,1, figsize = (6,6), sharey = True)
ax.bar(bins, IoU_binned)
ax.set_xlabel("param $s_p$")
ax.set_title("(a) binned over $s_p$ for %s"%modelid)
ax.set_ylim([0.6,1.0])
fig.tight_layout()
```
**Compare accuracy with SNR:** Is accuracy sensitive to the SNR in input images?
```
mean_IoUs = np.zeros(len(plot_labels))
for il, label in enumerate(plot_labels):
mean_IoUs[il] = calc_jac_acc(y[sample_labels == il],\
yp[sample_labels == il])
plt.barh(tmp_labels, mean_IoUs)
plt.xlabel("IoU")
print("Min IoU in datasets: %.2f"%mean_IoUs.min())
plt.title("Patches", fontsize = 20)
```
### Analyze the reduced latent (z) space
**Plot PCA** Plot the 2D projection of latent space
```
sns.set(font_scale = 1.2)
sns.set_style("whitegrid", {'axes.grid' : False})
fig, ax = plt.subplots(1,1,figsize = (8,8), sharex = True, sharey = True)
sns.scatterplot(data = df, x = "$z_0$", y = "$z_1$", \
hue = "param", \
palette = "viridis", ax = ax, \
legend = 'full', \
style = "measurement", )
modelid = model_tag.split('_')[0].split('d')
modelid = ('-').join(modelid)
ax.set_title("%s"%modelid)
fig.tight_layout()
```
**Compare** calculated porosity metrics with z-space. Npte that $s_p$ varies logarithmically with pore size (or number of pores per volume).
```
df["log npores"] = np.log(df["npores"])
df["log pore-vol"] = np.log(df["pore-vol"])
df["${log(z_1)}$"] = np.log(df["$z_1$"])
df["${log(z_0)}$"] = np.log(df["$z_0$"])
sns.set(font_scale = 1.2)
sns.set_style("whitegrid", {'axes.grid' : False})
fig, ax = plt.subplots(1,2,figsize = (8,4), sharex = True, sharey = True)
hues = ["log npores", "log pore-vol"] #["npores", "pore-vol"]
for idx, hue in enumerate(hues):
sns.scatterplot(data = df, x = "$z_0$", y = "$z_1$", \
hue = hue, \
palette = "viridis", ax = ax.flat[idx], \
legend = 'brief')
modelid = model_tag.split('_')[0].split('d')
modelid = ('-').join(modelid)
ax.flat[idx].set_title("%s"%modelid)
fig.tight_layout()
```
### Analyze Compute Times
```
niters = 4000
idx = 10
```
**Pore analysis** (connected components)
```
t0 = time.time()
output = [pore_analysis(y[idx]) for idx in range(niters)]
t1 = time.time()
tot_time = (t1-t0)*1000.0/niters
print(tot_time)
```
**Segmentation** (encoder+decoder) - this could vary significantly based on available GPU memory
```
x_in = x[:niters]
t0 = time.time()
yp_temp = segmenter.predict(x_in)
t1 = time.time()
tot_time = (t1-t0)*1000.0/niters
print(tot_time)
```
**Encoder**
```
x_in = x[:niters]
t0 = time.time()
yp_temp = encoder.predict(x_in)
t1 = time.time()
tot_time = (t1-t0)*1000.0/niters
print(tot_time)
```
### THE END
| github_jupyter |
# Benchmark ML Computation Speed
In this notebook, we test the computational performance of [digifellow](https://digifellow.swfcloud.de/hub/spawn) jupyterhub performance against free access like *Colab* and *Kaggle*. The baseline of this comparison is an average PC *(Core i5 2.5GHz - 8GB RAM - No GPU)*
The task of this test is classifying the MNIST dataset with different algorithms *(LR, ANN, CNN)* involving different libraries *(SKLearn, Tensorflow)* and comparing the performance with and without GPU acceleration.
## Dependencies
```
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.datasets import mnist
from tensorflow.keras import layers
from sklearn.linear_model import LogisticRegression
readings = []
```
## Preprocessing
```
(train_images, train_labels), (_i, _l) = mnist.load_data()
train_images = train_images.reshape(-1,28*28)
train_images = train_images / 255.0
```
## SieKitLearn - Logistic Regression
```
LG = LogisticRegression(penalty='l1', solver='saga', tol=0.1)
```
### sklearn timer
```
%%timeit -n 1 -r 10 -o
LG.fit(train_images, train_labels)
readings.append(_.all_runs)
```
## Tensorflow - ANN
```
annModel = keras.Sequential()
annModel.add(tf.keras.Input(shape=(28*28,)))
annModel.add(layers.Dense(128, activation='relu'))
annModel.add(layers.Dense(10, activation='softmax'))
annModel.compile('sgd','sparse_categorical_crossentropy',['accuracy'])
```
### ANN timer (CPU)
```
%%timeit -n 1 -r 10 -o
with tf.device('/CPU:0'):
annModel.fit(train_images, train_labels, epochs=5, verbose=0)
readings.append(_.all_runs)
```
### ANN timer (GPU)
```
%%timeit -n 1 -r 10 -o
with tf.device('/GPU:0'):
annModel.fit(train_images, train_labels, epochs=5, verbose=0)
readings.append(_.all_runs)
```
## Tensorflow - CNN
```
cnnModel = keras.Sequential()
cnnModel.add(tf.keras.Input(shape=(28, 28, 1)))
cnnModel.add(layers.Conv2D(filters=16,kernel_size=(3, 3),activation='relu'))
cnnModel.add(layers.BatchNormalization())
cnnModel.add(layers.MaxPooling2D())
cnnModel.add(layers.Flatten())
cnnModel.add(layers.Dense(128, activation='relu'))
cnnModel.add(layers.Dropout(0.2))
cnnModel.add(layers.Dense(10, activation='softmax'))
cnnModel.compile('sgd','sparse_categorical_crossentropy',['accuracy'])
```
### CNN timer (CPU)
```
%%timeit -n 1 -r 10 -o
with tf.device('/CPU:0'):
cnnModel.fit(train_images.reshape(-1, 28, 28, 1), train_labels, epochs=5, verbose=0)
readings.append(_.all_runs)
```
### CNN timer (GPU)
```
%%timeit -n 1 -r 10 -o_.all_runs
with tf.device('/GPU:0'):
cnnModel.fit(train_images.reshape(-1, 28, 28, 1), train_labels, epochs=5, verbose=0)
readings.append(_.all_runs)
```
## Storing readings
```
import csv
with open('readings', 'w') as f:
wr = csv.writer(f)
wr.writerow(readings)
```
Done :)
| github_jupyter |
<h1> <b>Homework 2</b></h1>
<i>Alejandro J. Rojas<br>
ale@ischool.berkeley.edu<br>
W261: Machine Learning at Scale<br>
Week: 02<br>
Jan 26, 2016</i></li>
<h2>HW2.0. </h2>
What is a race condition in the context of parallel computation? Give an example.
What is MapReduce?
How does it differ from Hadoop?
Which programming paradigm is Hadoop based on? Explain and give a simple example in code and show the code running.
<h2>HW2.1. Sort in Hadoop MapReduce</h2>
Given as input: Records of the form '<'integer, “NA”'>', where integer is any integer, and “NA” is just the empty string.
Output: sorted key value pairs of the form '<'integer, “NA”'>' in decreasing order; what happens if you have multiple reducers? Do you need additional steps? Explain.
Write code to generate N random records of the form '<'integer, “NA”'>'. Let N = 10,000.
Write the python Hadoop streaming map-reduce job to perform this sort. Display the top 10 biggest numbers. Display the 10 smallest numbers
# Data
```
import random
N = 10000 ### for a sample size of N
random.seed(0) ### pick a random seed to replicate results
input_file = open("numcount.txt", "w") # writing file
for i in range(N):
a = random.randint(0, 100) ### Select a random integer from 0 to 100
b = ''
input_file.write(str(a))
input_file.write(b)
input_file.write('\n')
input_file.close()
```
# Mapper
```
%%writefile mapper.py
#!/usr/bin/python
import sys
for line in sys.stdin: ### input comes from STDIN (standard input)
number = line.strip() ### remove leading and trailing whitespace
print ('%s\t%s' % (number, 1)) ### mapper out looks like 'number' \t 1
!chmod +x mapper.py
```
# Reducer
```
%%writefile reducer.py
#!/usr/bin/python
from operator import itemgetter
import sys
current_number = None
current_count = 0
number = None
numlist = []
# input comes from STDIN
for line in sys.stdin:
line = line.strip() ### remove leading and trailing whitespace
line = line.split('\t') ### parse the input we got from mapper.py
number = line[0] ### integer generated randomly we got from mapper.py
try:
count = line[1]
count = int(count) ### convert count (currently a string) to int
except ValueError: ### if count was not a number then silently
continue ### ignore/discard this line
if current_number == number: ### this IF-switch only works because Hadoop sorts map output
current_count += count ### by key (here: number) before it is passed to the reducer
else:
if current_number:
numlist.append((current_number,current_count)) ### store tuple in a list once totalize count per number
current_count = count ### set current count
current_number = number ### set current number
if current_number == number: ### do not forget to output the last word if needed!
numlist.append((current_number,current_count))
toplist = sorted(numlist,key=lambda record: record[1], reverse=True) ### sort list from largest count to smallest
bottomlist = sorted(numlist,key=lambda record: record[1]) ### sort list from smalles to largest
print '%25s' %'TOP 10', '%25s' % '', '%28s' %'BOTTOM 10'
print '%20s' %'Number', '%10s' %'Count', '%20s' % '', '%20s' %'Number','%10s' %'Count'
for i in range (10):
print '%20s%10s' % (toplist[i][0], toplist[i][1]),'%20s' % '', '%20s%10s' % (bottomlist[i][0], bottomlist[i][1])
!chmod +x reducer.py
!echo "10 \n 10\n 5\n 6\n 8\n 9 \n 10 \n 9 \n 12 \n 21 \n 22 \n 23 \n 24 \n 25" | python mapper.py | sort -k1,1 | python reducer.py
```
# Run numcount in Hadoop
<h2>start yarn and hdfs</h2>
```
!/usr/local/Cellar/hadoop/2.7.1/sbin/start-yarn.sh ### start up yarn
!/usr/local/Cellar/hadoop/2.7.1/sbin/start-dfs.sh ### start up dfs
```
<h2> remove files from prior runs </h2>
```
!hdfs dfs -rm -r /user/venamax ### remove prior files
```
<h2> create folder</h2>
```
!hdfs dfs -mkdir -p /user/venamax ### create hdfs folder
```
<h2> upload numcount.txt to hdfs</h2>
```
!hdfs dfs -put numcount.txt /user/venamax #### save source data file to hdfs
```
<h2> Hadoop streaming command </h2>
hadoop jar hadoopstreamingjarfile \
-D stream.num.map.output.key.fields=n \
-mapper mapperfile \
-reducer reducerfile \
-input inputfile \
-output outputfile
```
!hadoop jar hadoop-*streaming*.jar -mapper mapper.py -reducer reducer.py -input numcount.txt -output numcountOutput
```
<h2>show the results</h2>
```
!hdfs dfs -cat numcountOutput/part-00000
```
<h2>stop yarn and hdfs </h2>
```
!/usr/local/Cellar/hadoop/2.7.1/sbin/stop-yarn.sh
!/usr/local/Cellar/hadoop/2.7.1/sbin/stop-dfs.sh
```
<h2>HW2.2. WORDCOUNT</h2>
Using the Enron data from HW1 and Hadoop MapReduce streaming, write the mapper/reducer job that will determine the word count (number of occurrences) of each white-space delimitted token (assume spaces, fullstops, comma as delimiters). Examine the word “assistance” and report its word count results.
CROSSCHECK: >grep assistance enronemail_1h.txt|cut -d$'\t' -f4| grep assistance|wc -l
8
#NOTE "assistance" occurs on 8 lines but how many times does the token occur? 10 times! This is the number we are looking for!
# Mapper
```
%%writefile mapper.py
#!/usr/bin/python
## mapper.py
## Author: Alejandro J. Rojas
## Description: mapper code for HW2.2
import sys
import re
########## Collect user input ###############
filename = sys.argv[1]
findwords = re.split(" ",sys.argv[2].lower())
with open (filename, "r") as myfile:
for line in myfile.readlines():
line = line.strip()
record = re.split(r'\t+', line) ### Each email is a record with 4 components
### 1) ID 2) Spam Truth 3) Subject 4) Content
if len(record)==4: ### Take only complete records
for i in range (2,len(record)): ### Starting from Subject to the Content
bagofwords = re.split(" " | "," ,record[i])### Collect all words present on each email
for word in bagofwords:
flag=0
if word in findwords:
flag=1
print '%s\t%s\t%s\t%s\t%s' % (word, 1,record[0], record[1],flag)
### output: word, 1, id, spam truth and flag
!chmod +x mapper.py
```
# Reducer
```
%%writefile reducer.py
#!/usr/bin/python
from operator import itemgetter
import sys
from itertools import groupby
current_word, word = None, None
current_wordcount, current_spam_wordcount, current_ham_wordcount = 0,0,0
current_id, record_id = None, None
current_y_true, y_true = None, None
current_flag, flag = None,None
sum_records, sum_spamrecords, sum_hamrecords = 0,0,0
sum_spamwords, sum_hamwords = 0,0
flagged_words = []
emails={} #Associative array to hold email data
words={} #Associative array for word data
# input comes from STDIN
for line in sys.stdin:
line = line.strip() ### remove leading and trailing whitespace
line = line.split('\t') ### parse the input we got from mapper.py
word = line[0] ### word we get from mapper.py
try:
count = line[1]
count = int(count) ### convert count (currently a string) to int
email = line[2] ### id that identifies each email
y_true = line[3]
y_true = int(y_true) ### spam truth as an integer
flag = line[4]
flag = int(flag) ### flags if word is in the user specified list
except ValueError: ### if count was not a number then silently
continue ### ignore/discard this line
if current_word == word: ### this IF-switch only works because Hadoop sorts map output
current_count += count ### by key (here: word) before it is passed to the reducer
if current_word not in words.keys():
words[current_word]={'ham_count':0,'spam_count':0,'flag':flag}
if email not in emails.keys():
emails[current_email]={'y_true':y_true,'word_count':0,'words':[]}
if y_true == 1:
sma
if y_true == 1: ### if record where word is located is a spam
current_spamcount += count ### add to spam count of that word
sum_spamwords += 1
else:
current_hamcount += count ### if not add to ham count of thet word
sum_hamwords +=1
emails[current_email]['word_count'] += 1
emails[current_email]['words'].append(current_word)### store words in email
else:
if current_word:
if flag==1 and current_word not in flagged_words:
flagged_words.append(current_word)
words[current_word]['flag'] = flag ### denote if current word is a word specified by the user list
words[current_word]['spam_count'] += current_spamcount ### update spam count for current word
words[current_word]['ham_count'] += current_hamcount ### update ham count for current word
current_count = count ### set current count
current_spamcount, current_hamcount = 0,0 ### initialize spam and ham wordcount
current_word = word ### set current number
current_email = email ### set current id of email
current_y_true = y_true ### set current spam truth
current_flag = flag ### set current flag
if current_word == word: ### do not forget to output the last word if needed!
emails[current_email]['word_count'] += 1
emails[current_email]['words'].append(current_word)### store words in email
words[current_word]['flag'] = flag ### denote if current word is a word specified by the user list
words[current_word]['spam_count'] += current_spamcount ### update spam count for current word
words[current_word]['ham_count'] += current_hamcount ### update ham count for current word
#Calculate stats for entire corpus
prior_spam=spam_email_count/len(emails)
prior_ham=1-prior_spam
vocab_count=len(words)#number of unique words in the total vocabulary
for k,word in words.iteritems():
#These versions calculate conditional probabilities WITH Laplace smoothing.
#word['p_spam']=(word['spam_count']+1)/(spam_word_count+vocab_count)
#word['p_ham']=(word['ham_count']+1)/(ham_word_count+vocab_count)
#Compute conditional probabilities WITHOUT Laplace smoothing
word['p_spam']=(word['spam_count'])/(spam_word_count)
word['p_ham']=(word['ham_count'])/(ham_word_count)
#At this point the model is now trained, and we can use it to make our predictions
for j,email in emails.iteritems():
#Log versions - no longer used
#p_spam=log(prior_spam)
#p_ham=log(prior_ham)
p_spam=prior_spam
p_ham=prior_ham
for word in email['words']:
if word in flagged_words:
try:
#p_spam+=log(words[word]['p_spam']) #Log version - no longer used
p_spam*=words[word]['p_spam']
except ValueError:
pass #This means that words that do not appear in a class will use the class prior
try:
#p_ham+=log(words[word]['p_ham']) #Log version - no longer used
p_ham*=words[word]['p_ham']
except ValueError:
pass
if p_spam>p_ham:
spam_pred=1
else:
spam_pred=0
print j+'\t'+str(email['spam'])+'\t'+str(spam_pred)
toplist = sorted(numlist,key=lambda record: record[1], reverse=True) ### sort list from largest count to smallest
bottomlist = sorted(numlist,key=lambda record: record[1]) ### sort list from smalles to largest
print '%25s' %'TOP 10', '%25s' % '', '%28s' %'BOTTOM 10'
print '%20s' %'Number', '%10s' %'Count', '%20s' % '', '%20s' %'Number','%10s' %'Count'
for i in range (10):
print '%20s%10s' % (toplist[i][0], toplist[i][1]),'%20s' % '', '%20s%10s' % (bottomlist[i][0], bottomlist[i][1])
```
<h2>HW2.2.1</h2> Using Hadoop MapReduce and your wordcount job (from HW2.2) determine the top-10 occurring tokens (most frequent tokens)
<h2>HW2.3. Multinomial NAIVE BAYES with NO Smoothing</h2>
Using the Enron data from HW1 and Hadoop MapReduce, write a mapper/reducer job(s) that
will both learn Naive Bayes classifier and classify the Enron email messages using the learnt Naive Bayes classifier. Use all white-space delimitted tokens as independent input variables (assume spaces, fullstops, commas as delimiters). Note: for multinomial Naive Bayes, the Pr(X=“assistance”|Y=SPAM) is calculated as follows:
the number of times “assistance” occurs in SPAM labeled documents / the number of words in documents labeled SPAM
E.g., “assistance” occurs 5 times in all of the documents Labeled SPAM, and the length in terms of the number of words in all documents labeled as SPAM (when concatenated) is 1,000. Then Pr(X=“assistance”|Y=SPAM) = 5/1000. Note this is a multinomial estimation of the class conditional for a Naive Bayes Classifier. No smoothing is needed in this HW. Multiplying lots of probabilities, which are between 0 and 1, can result in floating-point underflow. Since log(xy) = log(x) + log(y), it is better to perform all computations by summing logs of probabilities rather than multiplying probabilities. Please pay attention to probabilites that are zero! They will need special attention. Count up how many times you need to process a zero probabilty for each class and report.
Report the performance of your learnt classifier in terms of misclassifcation error rate of your multinomial Naive Bayes Classifier. Plot a histogram of the log posterior probabilities (i.e., Pr(Class|Doc))) for each class over the training set. Summarize what you see.
Error Rate = misclassification rate with respect to a provided set (say training set in this case). It is more formally defined here:
Let DF represent the evalution set in the following:
Err(Model, DF) = |{(X, c(X)) ∈ DF : c(X) != Model(x)}| / |DF|
Where || denotes set cardinality; c(X) denotes the class of the tuple X in DF; and Model(X) denotes the class inferred by the Model “Model”
<h2>HW2.4 Repeat HW2.3 with the following modification: use Laplace plus-one smoothing. </h2>
Compare the misclassifcation error rates for 2.3 versus 2.4 and explain the differences.
For a quick reference on the construction of the Multinomial NAIVE BAYES classifier that you will code,
please consult the "Document Classification" section of the following wikipedia page:
https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Document_classification
OR the original paper by the curators of the Enron email data:
http://www.aueb.gr/users/ion/docs/ceas2006_paper.pdf
<h2>HW2.5. Repeat HW2.4. This time when modeling and classification ignore tokens with a frequency of less than three (3) in the training set. </h2>How does it affect the misclassifcation error of learnt naive multinomial Bayesian Classifier on the training dataset:
<h2>HW2.6 Benchmark your code with the Python SciKit-Learn implementation of the multinomial Naive Bayes algorithm</h2>
It always a good idea to benchmark your solutions against publicly available libraries such as SciKit-Learn, The Machine Learning toolkit available in Python. In this exercise, we benchmark ourselves against the SciKit-Learn implementation of multinomial Naive Bayes. For more information on this implementation see: http://scikit-learn.org/stable/modules/naive_bayes.html more
In this exercise, please complete the following:
— Run the Multinomial Naive Bayes algorithm (using default settings) from SciKit-Learn over the same training data used in HW2.5 and report the misclassification error (please note some data preparation might be needed to get the Multinomial Naive Bayes algorithm from SkiKit-Learn to run over this dataset)
- Prepare a table to present your results, where rows correspond to approach used (SkiKit-Learn versus your Hadoop implementation) and the column presents the training misclassification error
— Explain/justify any differences in terms of training error rates over the dataset in HW2.5 between your Multinomial Naive Bayes implementation (in Map Reduce) versus the Multinomial Naive Bayes implementation in SciKit-Learn
<h2>HHW 2.6.1 OPTIONAL (note this exercise is a stretch HW and optional)</h2>
— Run the Bernoulli Naive Bayes algorithm from SciKit-Learn (using default settings) over the same training data used in HW2.6 and report the misclassification error
- Discuss the performance differences in terms of misclassification error rates over the dataset in HW2.5 between the Multinomial Naive Bayes implementation in SciKit-Learn with the Bernoulli Naive Bayes implementation in SciKit-Learn. Why such big differences. Explain.
Which approach to Naive Bayes would you recommend for SPAM detection? Justify your selection.
<h2>HW2.7 OPTIONAL (note this exercise is a stretch HW and optional)</h2>
The Enron SPAM data in the following folder enron1-Training-Data-RAW is in raw text form (with subfolders for SPAM and HAM that contain raw email messages in the following form:
--- Line 1 contains the subject
--- The remaining lines contain the body of the email message.
In Python write a script to produce a TSV file called train-Enron-1.txt that has a similar format as the enronemail_1h.txt that you have been using so far. Please pay attend to funky characters and tabs. Check your resulting formated email data in Excel and in Python (e.g., count up the number of fields in each row; the number of SPAM mails and the number of HAM emails). Does each row correspond to an email record with four values? Note: use "NA" to denote empty field values.
<h2>HW2.8 OPTIONAL</h2>
Using Hadoop Map-Reduce write job(s) to perform the following:
-- Train a multinomial Naive Bayes Classifier with Laplace plus one smoothing using the data extracted in HW2.7 (i.e., train-Enron-1.txt). Use all white-space delimitted tokens as independent input variables (assume spaces, fullstops, commas as delimiters). Drop tokens with a frequency of less than three (3).
-- Test the learnt classifier using enronemail_1h.txt and report the misclassification error rate. Remember to use all white-space delimitted tokens as independent input variables (assume spaces, fullstops, commas as delimiters). How do we treat tokens in the test set that do not appear in the training set?
<h2>HW2.8.1 OPTIONAL</h2>
— Run both the Multinomial Naive Bayes and the Bernoulli Naive Bayes algorithms from SciKit-Learn (using default settings) over the same training data used in HW2.8 and report the misclassification error on both the training set and the testing set
- Prepare a table to present your results, where rows correspond to approach used (SciKit-Learn Multinomial NB; SciKit-Learn Bernouili NB; Your Hadoop implementation) and the columns presents the training misclassification error, and the misclassification error on the test data set
- Discuss the performance differences in terms of misclassification error rates over the test and training datasets by the different implementations. Which approch (Bernouili versus Multinomial) would you recommend for SPAM detection? Justify your selection.
<h2>=====================
END OF HOMEWORK</h2>
| github_jupyter |
```
import numpy as np
import sys
sys.path.append('../external/Transformer_modules/')
sys.path.append('../src/')
import torch, torch.nn as nn
import torch.nn.functional as F
from modules import MultiHeadAttention, PositionwiseFeedForward
import mnist
%load_ext autoreload
%autoreload 2
import torch, torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class GlobalAveragePooling(nn.Module):
def __init__(self, dim=-1):
super(self.__class__, self).__init__()
self.dim = dim
def forward(self, x):
return x.mean(dim=self.dim)
class Discriminator(nn.Module):
def __init__(self, in_dim, hidden_dim=100,ffn_dim =200,n_head=8):
super(Discriminator, self).__init__()
self.fc1 = nn.Linear(in_dim, hidden_dim)
nn.init.xavier_normal_(self.fc1.weight)
nn.init.constant_(self.fc1.bias, 0.0)
self.mha_1 = MultiHeadAttention(n_head=n_head,d_model = hidden_dim)
self.ffn_1 = PositionwiseFeedForward(hidden_dim, ffn_dim, use_residual=False)
self.gl_1 = GlobalAveragePooling(dim = 1)
self.fc2 = nn.Linear(hidden_dim, 10)
nn.init.xavier_normal_(self.fc2.weight)
nn.init.constant_(self.fc2.bias, 0.0)
def forward(self, x):
h1 = F.relu(self.fc1(x))
h2 = self.mha_1(h1)
h3 = self.ffn_1(h2)
score = self.fc2(self.gl_1(h3))
return score
x_train = mnist.make_clouds(mnist.x_train,500)
y_train = mnist.y_train
x_val = mnist.make_clouds(mnist.x_val,500)
y_val = mnist.y_val
model = Discriminator(2).cuda(0)
x_test = mnist.mnist_test
def compute_loss(X_batch, y_batch):
X_batch = Variable(torch.FloatTensor(X_batch)).cuda(0)
y_batch = Variable(torch.LongTensor(y_batch)).cuda(0)
logits = model(X_batch)
return F.cross_entropy(logits, y_batch).mean()
def iterate_minibatches(X, y, batchsize):
indices = np.random.permutation(np.arange(len(X)))
for start in range(0, len(indices), batchsize):
ix = indices[start: start + batchsize]
yield X[ix], y[ix]
opt = torch.optim.Adam(model.parameters())
import time
num_epochs = 150 # total amount of full passes over training data
batch_size = 200
train_loss = []
val_accuracy = []
for epoch in range(num_epochs):
start_time = time.time()
model.train(True)
for X_batch, y_batch in iterate_minibatches(x_train,y_train,batchsize=batch_size):
# train on batch
loss = compute_loss(X_batch, y_batch)
loss.backward()
opt.step()
opt.zero_grad()
train_loss.append(loss.cpu().detach().numpy())
del loss
# And a full pass over the validation data:
model.train(False) # disable dropout / use averages for batch_norm
for X_batch, y_batch in iterate_minibatches(x_val, y_val, batch_size):
logits = model(Variable(torch.FloatTensor(X_batch)).cuda())
y_pred = logits.max(1)[1].cpu().detach().numpy()
val_accuracy.append(np.mean(y_batch == y_pred))
del logits
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss (in-iteration): \t{:.6f}".format(
np.mean(train_loss[-len(x_train) // batch_size :])))
print(" validation accuracy: \t\t\t{:.2f} %".format(
np.mean(val_accuracy[-len(x_val) // batch_size :]) * 100))
```
| github_jupyter |
# Motivación: Redes Neuronales Convolucionales
La información que extraemos de las entradas sensoriales a menudo está determinada por su contexto. Con las imágenes, podemos suponer que los píxeles cercanos están estrechamente relacionados y su información colectiva es más relevante cuando se toma como una unidad. Por el contrario, podemos suponer que los píxeles individuales no transmiten información relacionada entre sí. Por ejemplo, para reconocer letras o dígitos, necesitamos analizar la dependencia de píxeles cercanos, porque determinan la forma del elemento. De esta manera, podríamos calcular la diferencia entre, por ejemplo, un 0 o un 1. Los píxeles de una imagen están organizados en una cuadrícula bidimensional, y si la imagen no es en escala de grises, tendremos una tercera dimensión para Los mapas de colores. Alternativamente, una imagen de resonancia magnética (MRI) también usa espacio tridimensional. Puede recordar que, hasta ahora, si queríamos alimentar una imagen a una red neuronal, teníamos que cambiarla de una matriz bidimensional a una matriz unidimensional. Las CNN están diseñadas para abordar este problema: cómo hacer que la información perteneciente a las neuronas que están más cerca sea más relevante que la información proveniente de las neuronas que están más separadas. En problemas visuales, esto se traduce en hacer que las neuronas procesen información proveniente de píxeles que están cerca uno del otro. Con CNNs, podremos alimentar entradas de una, dos o tres dimensiones y la red producirá una salida de la misma dimensionalidad. Como veremos más adelante, esto nos dará varias ventajas
Cuando tratamos de clasificar las imágenes CIFAR-10 usando una red de capas completamente conectadas con poco éxito. Una de las razones es que se sobreajustan. Si miramos la primera capa oculta de esa red, que tiene 1.024 neuronas. El tamaño de entrada de la imagen es 32x32x3 = 3,072. Por lo tanto, la primera capa oculta tenía un total de 2072 * 1024 = 314, 5728 pesos. ¡Ese no es un número pequeño! No solo es fácil sobreajustar una red tan grande, sino que también es ineficiente en la memoria. Además, cada neurona de entrada (o píxel) está conectada a cada neurona en la capa oculta. Debido a esto, la red no puede aprovechar la proximidad espacial de los píxeles, ya que no tiene una manera de saber qué píxeles están cerca uno del otro. Por el contrario, las CNN tienen propiedades que proporcionan una solución efectiva a estos problemas:
- Conectan neuronas, que solo corresponden a píxeles vecinos de la imagen. De esta manera, las neuronas están "forzadas" a recibir información de otras neuronas que están espacialmente cercanas. Esto también reduce el número de pesos, ya que no todas las neuronas están interconectadas.
- Una CNN utiliza el uso compartido de parámetros. En otras palabras, se comparte un número limitado de pesos entre todas las neuronas de una capa. Esto reduce aún más la cantidad de pesas y ayuda a combatir el sobreajuste. Puede sonar confuso, pero quedará claro en la siguiente sección.
La capa convolucional es el bloque de construcción más importante de una CNN. Consiste en un conjunto de filtros (también conocidos como núcleos o detectores de características), donde cada filtro se aplica en todas las áreas de los datos de entrada. Un filtro se define por un conjunto de pesos aprendibles. Como un guiño al tema en cuestión, la siguiente imagen ilustra esto muy bien:

Se muestra una capa de entrada bidimensional de una red neuronal. Por el bien de la simplicidad, asumiremos que esta es la capa de entrada, pero puede ser cualquier capa de la red. Como hemos visto en los capítulos anteriores, cada neurona de entrada representa la intensidad de color de un píxel (asumiremos que es una imagen en escala de grises por simplicidad). Primero, aplicaremos un filtro 3x3 en la parte superior derecha esquina de la imagen. Cada neurona de entrada está asociada con un solo peso del filtro. Tiene nueve pesos, debido a las nueve neuronas de entrada, pero, en general, el tamaño es arbitrario (2x2, 4x4, 5x5, etc.). La salida del filtro es una suma ponderada de sus entradas (el activaciones de las neuronas de entrada). Su propósito es resaltar una característica específica en la entrada, por ejemplo, una arista o una línea. El grupo de neuronas cercanas, que participan en la entrada. se llaman el campo receptivo. En el contexto de la red, la salida del filtro representa el valor de activación de una neurona en la siguiente capa. La neurona estará activa, si la función es presente en esta ubicación espacial.
Para cada nueva neurona, deslizaremos el filtro por la imagen de entrada y calcularemos su salida (la suma ponderada) con cada nuevo conjunto de neuronas de entrada. En el siguiente diagrama, puede ver cómo calcular las activaciones de las siguientes dos posiciones (un píxel para derecho):

- Al decir "arrastrar", queremos decir que los pesos del filtro no cambian en la imagen. En efecto, utilizaremos los mismos nueve pesos de filtro para calcular las activaciones de todas las neuronas de salida, cada vez con un conjunto diferente de neuronas de entrada. Llamamos a este parámetro compartir, y lo hacemos por dos razones:
- Al reducir el número de pesos, reducimos la huella de la memoria y evitamos el sobreajuste. El filtro resalta características específicas. Podemos suponer que esta característica es útil, independientemente de su posición en la imagen. Al compartir pesos, garantizamos que el filtro podrá ubicar la función en toda la imagen.
Hasta ahora, hemos descrito la relación de corte uno a uno, donde la salida es un solo corte, que toma la entrada de otro segmento (o una imagen). Esto funciona bien en escala de grises, pero cómo ¿Lo adaptamos para imágenes en color (relación n a 1)? ¡Una vez más, es simple! Primero, dividiremos el imagen en canales de color. En el caso de RGB, serían tres. Podemos pensar en cada color canal como un segmento de profundidad, donde los valores son las intensidades de píxeles para el color dado (R, G, o B), como se muestra en el siguiente ejemplo:
La combinación de sectores se denomina volumen de entrada con una profundidad de 3. Un filtro único de 3x3 es
aplicado a cada rebanada. La activación de una neurona de salida es solo la suma ponderada de filtros aplicados en todos los sectores. En otras palabras, combinaremos los tres filtros en un gran 3 x 3 x 3 + 1 filtro con 28 pesos (agregamos profundidad y un solo sesgo). Entonces, calcularemos el suma ponderada aplicando los pesos relevantes a cada segmento.
- Los mapas de características de entrada y salida tienen diferentes dimensiones. Digamos que tenemos una capa de entrada con tamaño (ancho, alto) y un filtro con dimensiones (filter_w, filter_h). Después de aplicar la convolución, las dimensiones de la capa de salida son (ancho - filtro_w + 1, altura - filtro_h + 1).
Como mencionamos, un filtro resalta una característica específica, como bordes o líneas. Pero, en general, muchas características son importantes y nos interesarán todas. ¿Cómo los destacamos a todos? Como de costumbre, es simple. Aplicaremos varios filtros en el conjunto de sectores de entrada. Cada filtro generará un segmento de salida único, que resalta la característica, detectada por el filtro (relación de n a m). Un sector de salida puede recibir información de:
- Todos los sectores de entrada, que es el estándar para capas convolucionales. En este escenario, un segmento de salida único es un caso de la relación n-a-1, que describimos anteriormente. Con múltiples segmentos de salida, la relación se convierte en n-m. En otras palabras, cada segmento de entrada contribuye a la salida de cada segmento de salida.
- Una sola porción de entrada. Esta operación se conoce como convolución profunda. Es un
tipo de reversión del caso anterior. En su forma más simple, aplicamos un filtro sobre un único segmento de entrada para producir un único segmento de salida. Este es un caso de la relación uno a uno, que describimos en la sección anterior. Pero también podemos especificar un multiplicador de canal (un entero m), donde aplicamos filtros m sobre un solo sector de salida para producir m sectores de salida. Este es un caso de relación de 1 a m. El número total de segmentos de salida es n * m.
Denotemos el ancho y la altura del filtro con Fw y Fh, la profundidad del volumen de entrada con D y la profundidad del volumen de salida con M. Luego, podemos calcular el número total de pesos W en una capa convolucional con el siguiente ecuación:
\begin{equation}
W=(D*F_w *F_h+1)*M
\end{equation}
Digamos que tenemos tres sectores y queremos aplicarles cuatro filtros de 5x5. Entonces la la capa convolucional tendrá un total de (3x5x5 + 1) * 4 = 304 pesos, y cuatro cortes de salida (volumen de salida con una profundidad de 4), un sesgo por corte. El filtro para cada segmento de salida tendrá tres parches de filtro de 5x5 para cada uno de los tres segmentos de entrada y un sesgo para un total de 3x5x5 + 1 = 76 pesos. La combinación de los mapas de salida se denomina volumen de salida con una profundidad de cuatro.
```
import numpy as np
def conv(image, im_filter):
"""
:param image: grayscale image as a 2-dimensional numpy array
:param im_filter: 2-dimensional numpy array
"""
# input dimensions
height = image.shape[0]
width = image.shape[1]
# output image with reduced dimensions
im_c = np.zeros((height - len(im_filter) + 1,width - len(im_filter) + 1))
# iterate over all rows and columns
for row in range(len(im_c)):
for col in range(len(im_c[0])):
# apply the filter
for i in range(len(im_filter)):
for j in range(len(im_filter[0])):
im_c[row, col] += image[row + i, col + j] *im_filter[i][j]
# fix out-of-bounds values
im_c[im_c > 255] = 255
im_c[im_c < 0] = 0
# plot images for comparison
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.figure()
plt.imshow(image, cmap=cm.Greys_r)
plt.show()
plt.imshow(im_c, cmap=cm.Greys_r)
plt.show()
import requests
from PIL import Image
from io import BytesIO
# Cargar la imagen
url ="https://upload.wikimedia.org/wikipedia/commons/thumb/8/88/Commander_Eileen_Collins_-_GPN-2000-001177.jpg/382px-Commander_Eileen_Collins_-_GPN-2000-001177.jpg?download"
resp = requests.get(url)
image_rgb =np.asarray(Image.open(BytesIO(resp.content)).convert("RGB"))
# Convertirla a escala de grises
image_grayscale = np.mean(image_rgb, axis=2, dtype=np.uint)
# Aplicar filtro de blur
blur = np.full([10, 10], 1. / 100)
conv(image_grayscale, blur)
sobel_x = [[-1, -2, -1],[0, 0, 0],
[1, 2, 1]]
conv(image_grayscale, sobel_x)
sobel_y = [[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]]
conv(image_grayscale, sobel_y)
```
# Stride y relleno en capas convolucionales
Hasta ahora, asumimos que el deslizamiento del filtro ocurre un píxel a la vez, pero ese no es siempre el caso. Podemos deslizar el filtro en múltiples posiciones. Este parámetro de las capas convolucionales se llama zancada. Por lo general, el paso es el mismo en todas las dimensiones de la entrada. En el siguiente diagrama, podemos ver una capa convolucional con un paso de 2:

Al usar una zancada mayor que 1, reducimos el tamaño del segmento de salida. En la sección anterior, presentamos una fórmula simple para el tamaño de salida, que incluía los tamaños de la entrada y el núcleo. Ahora, lo ampliaremos para incluir también el paso: ((ancho - filtro_w) / stride_w + 1, ((altura - filtro_h) / stride_h + 1).
Por ejemplo, el tamaño de salida de un corte cuadrado generado por una imagen de entrada de 28x28, convolucionado con un filtro 3x3 con zancada 1, sería 28-3 + 1 = 26. Pero con zancada 2, obtenemos (28-3) / 2 + 1 = 13. El efecto principal del paso más grande es un aumento en el campo receptivo de las neuronas de salida. Vamos a explicar esto con un ejemplo. Si usamos Stride 2, el tamaño del segmento de salida será aproximadamente cuatro veces menor que el de entrada. En otras palabras, una neurona de salida "cubrirá" el área, que es cuatro veces más grande, en comparación con las neuronas de entrada. Las neuronas en el
Las siguientes capas capturarán gradualmente la entrada de regiones más grandes de la imagen de entrada. Esto es importante, porque les permitiría detectar características más grandes y más complejas de la entrada.
Las operaciones de convolución que hemos discutido hasta ahora han producido una salida menor que la entrada. Pero, en la práctica, a menudo es deseable controlar el tamaño de la salida. Podemos resolver esto rellenando los bordes del segmento de entrada con filas y columnas de ceros antes de la operación de convolución. La forma más común de usar relleno es producir resultados con las mismas dimensiones que la entrada. En el siguiente diagrama, podemos ver una capa convolucional con relleno de 1:

Las neuronas blancas representan el relleno. Los segmentos de entrada y salida tienen las mismas dimensiones (neuronas oscuras). Esta es la forma más común de usar relleno. Los ceros recién rellenados participarán en la operación de convolución con el corte, pero no afectarán el resultado. La razón es que, aunque las áreas rellenadas estén conectadas con pesos a la siguiente capa, siempre multiplicaremos esos pesos por el valor rellenado, que es 0. Ahora agregaremos relleno a la fórmula del tamaño de salida. Deje que el tamaño del segmento de entrada sea I = (Iw, Ih), el tamaño del filtro F = (Fw, Fh), la zancada S = (Sw, Sh) y el relleno P = (Pw, Ph). Entonces el tamaño O = (Ow, Oh) del segmento de salida viene dado por las siguientes ecuaciones:
\begin{equation}
O_w=\frac{I_w+2P_w-F_w}{S_w}+1
\end{equation}
\begin{equation}
O_h=\frac{I_h+2P_h-F_h}{S_h}+1
\end{equation}
# Capas de pooling
En la sección anterior, explicamos cómo aumentar el campo receptivo de las neuronas usando un paso más grande que 1. Pero también podemos hacer esto con la ayuda de la agrupación de capas. Una capa de agrupación divide la porción de entrada en una cuadrícula, donde cada celda de la cuadrícula representa un campo receptivo de varias neuronas (tal como lo hace una capa convolucional). Luego, se aplica una operación de agrupación sobre cada celda de la cuadrícula. Existen diferentes tipos de capas de agrupación. Las capas de agrupación no cambian la profundidad del volumen, porque la operación de agrupación se realiza de forma independiente en cada segmento.
- Max pooling: es la forma más popular de pooling. La operación de agrupación máxima lleva a la neurona con el valor de activación más alto en cada campo receptivo local (celda de cuadrícula) y propaga solo ese valor hacia adelante. En la siguiente figura, podemos ver un ejemplo de agrupación máxima con un campo receptivo de 2x2:

- Average Pooling: es otro tipo de agrupación, donde la salida de cada campo receptivo es el valor medio de todas las activaciones dentro del campo. El siguiente es un ejemplo de agrupación promedio

Las capas de agrupación se definen por dos parámetros:
- Stride, que es lo mismo que con las capas convolucionales
- Tamaño del campo receptivo, que es el equivalente del tamaño del filtro en capas convolucionales.
# Estructura de una red neuronal convolucional

Normalmente, alternaríamos una o más capas convolucionales con una capa de agrupación. De esta forma, las capas convolucionales pueden detectar características en cada nivel del tamaño del campo receptivo. El tamaño del campo receptivo agregado de las capas más profundas es mayor que las del comienzo de la red. Esto les permite capturar características más complejas de regiones de entrada más grandes. Vamos a ilustrar esto con un ejemplo. Imagine que la red utiliza convoluciones de 3x3 con zancada 1 y agrupación de 2x2 con zancada 2:
- las neuronas de la primera capa convolucional recibirán información de 3x3 píxeles de la imagen.
- Un grupo de neuronas de salida 2x2 de la primera capa tendrá un tamaño de campo receptivo combinado de 4x4 (debido a la zancada).
- Después de la primera operación de agrupación, este grupo se combinará en una sola neurona de la capa de agrupación.
- La segunda operación de convolución toma información de las neuronas de agrupación 3x3. Por lo tanto, recibirá la entrada de un cuadrado con lados 3x4 = 12 (o un total de 12x12 = 144) píxeles de la imagen de entrada.
Utilizamos las capas convolucionales para extraer características de la entrada. Las características detectadas por las capas más profundas son muy abstractas, pero tampoco son legibles por los humanos. Para resolver este problema, generalmente agregamos una o más capas completamente conectadas después de la última capa convolucional / agrupación. En este ejemplo, la última capa (salida) completamente conectada utilizará softmax para estimar las probabilidades de clase de la entrada. Puede pensar en las capas totalmente conectadas como traductores entre el idioma de la red (que no entendemos) y el nuestro. Las capas convolucionales más profundas generalmente tienen más filtros (por lo tanto, mayor profundidad de volumen), en comparación con los iniciales. Un detector de características al comienzo de la red funciona en un pequeño campo receptivo. Solo puede detectar un número limitado de características, como bordes o líneas, compartidas entre todas las clases. Por otro lado, una capa más profunda detectaría características más complejas y numerosas. Por ejemplo, si tenemos varias clases, como automóviles, árboles o personas, cada una tendrá su propio conjunto de características, como neumáticos, puertas, hojas y caras, etc. Esto requeriría más detectores de características.
```
#Importen tensorflow, keras, de keras, importen Sequential, Dense, Activation,
#Convolution2D, MaxPooling, Flatten y np_utils
#Primero: Carguen datos de mnist
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Flatten
from keras.utils import np_utils
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 28, 28, 1)
X_test = X_test.reshape(10000, 28, 28, 1)
Y_train = np_utils.to_categorical(Y_train, 10)
Y_test = np_utils.to_categorical(Y_test, 10)
#Segundo: creen una red neuronal convolucional
model = Sequential([Convolution2D(filters=64,kernel_size=(3, 3),input_shape=(28, 28, 1)),Activation('sigmoid'),Convolution2D(filters=32,kernel_size=(3, 3)), Activation('sigmoid'),MaxPooling2D(pool_size=(4, 4)), Flatten(), Dense(64), Activation('relu'), Dense(10), Activation('softmax')])
model.compile(loss='categorical_crossentropy',metrics=['accuracy'], optimizer='adadelta')
model.fit(X_train, Y_train, batch_size=100, epochs=5,validation_split=0.1, verbose=1)
score = model.evaluate(X_test, Y_test, verbose=1)
print('Test accuracy:', score[1])
import warnings
warnings.filterwarnings("ignore")
```
# Preprocesamiento de datos
Hasta ahora, hemos alimentado la red con entradas no modificadas. En el caso de las imágenes, estas son intensidades de píxeles en el rango [0: 255]. Pero eso no es óptimo. Imagine que tenemos una imagen RGB, donde las intensidades en uno de los canales de color son muy altas en comparación con los otros dos. Cuando alimentamos la imagen a la red, los valores de este canal serán dominantes, disminuyendo los demás. Esto podría sesgar los resultados, porque en realidad cada canal tiene la misma importancia. Para resolver esto, necesitamos preparar (o normalizar) los datos, antes de alimentarlos a la red. En la práctica, usaremos dos tipos de normalización:
- Feature Scaling: Esta operación escala todas las entradas en el rango [0,1]. Por ejemplo, un píxel con intensidad 125 tendría un valor escalado de. El escalado de características es rápido y fácil de implementar.
- Standard Score: Aquí μ y σ son la media y la desviación estándar de todos los datos de entrenamiento. Por lo general, se calculan por separado para cada dimensión de entrada. Por ejemplo, en una imagen RGB, calcularíamos la media μ y σ para cada canal. Debemos tener en cuenta que μ y σ deben calcularse solo en los datos de entrenamiento y luego aplicarse a los datos de la prueba.
# Dropout
Dropout es una técnica de regularización, que se puede aplicar a la salida de algunas de las capas de red. El dropout aleatorio y periódico elimina algunas de las neuronas (junto con sus conexiones de entrada y salida) de la red. Durante un mini lote de entrenamiento, cada neurona tiene una probabilidad p de ser descartada estocásticamente. Esto es para asegurar que ninguna neurona termine confiando demasiado en otras neuronas y "aprenda" algo útil para la red. El abandono se puede aplicar después de capas convolucionales, de agrupación o completamente conectadas. En la siguiente ilustración, podemos ver un abandono de capas completamente conectadas:

# Aumento de datos
Una de las técnicas de regularización más eficientes es el aumento de datos. Si los datos de entrenamiento son demasiado pequeños, la red podría comenzar a sobreajustarse. El aumento de datos ayuda a contrarrestar esto al aumentar artificialmente el tamaño del conjunto de entrenamiento. Usemos un ejemplo. En los ejemplos de MNIST y CIFAR-10, hemos entrenado la red en varias épocas. La red "verá" cada muestra del conjunto de datos una vez por época. Para evitar esto, podemos aplicar aumentos aleatorios a las imágenes, antes de usarlas para el entrenamiento. Las etiquetas permanecerán igual. Algunos de los aumentos de imagen más populares son:

```
import keras
from keras.datasets import cifar10
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Activation, Flatten,BatchNormalization
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
batch_size = 50
(X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
Y_train = keras.utils.to_categorical(Y_train, 10)
Y_test = keras.utils.to_categorical(Y_test, 10)
data_generator = ImageDataGenerator(rotation_range=90, width_shift_range=0.1,height_shift_range=0.1, featurewise_center=True, featurewise_std_normalization=True, horizontal_flip=True)
data_generator.fit(X_train)
# standardize the test set
for i in range(len(X_test)):
X_test[i] = data_generator.standardize(X_test[i])
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',input_shape=X_train.shape[1:]))
model.add(BatchNormalization())
model.add(Activation('elu'))
model.add(Conv2D(32, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('elu'))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(128, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('elu'))
model.add(Conv2D(128, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
model.fit_generator( generator=data_generator.flow(x=X_train, y=Y_train, batch_size=batch_size),steps_per_epoch=len(X_train),epochs=100, validation_data=(X_test, Y_test),workers=4)
```
| github_jupyter |
# Investigating the historical running data to evaluate my performance (Session summaries)
Many runners use third-party apps to track running activities. These apps and its companion website provide many visual charts with analytical metrics to help runners review their running performances to set up new training plans or make adjustments. In this notebook, we will extract this running data and analyse it locally using runpandas. We also take the opportunity to illustrate the runpandas methods for summarizing the historical workouts and get some valuable insights.
## Looking at the data
The example data set used in this tutorial contains 68 sessions of a single female runner from the period of 2020 until 2021.
The code chunk below loads the data using the method `runpandas.read_directory_aggregate`, which allows the user to read all the tracking files of a support format in a directory and combine them in a data frame split by sessions based on the timestamps of each activity. It means that for each workout file will be stored in separate lines in the dataframe.
```
import warnings
warnings.filterwarnings('ignore')
import runpandas
session = runpandas.read_dir_aggregate(dirname='session/')
```
In pandas we use the ``pandas.MultiIndex`` which alows the dataframe have multiple columns as a row identifier, while having each index column related to another through a parent/child relationship. In our scenario we have the start time from each activity as the first index level and the timestamps from the activity as the second index level.
```
session
session.index #MultiIndex (start, timestamp)
```
Now let's see how many activities there are available for analysis. For this question, we also have an acessor ``runpandas.types.acessors.session._SessionAcessor`` that holds several methods for computing the basic running metrics across all the activities and some summary statistics.
```
#count the number of activities in the session
print ('Total Activities:', session.session.count())
```
We can compute the main running metrics (speed, pace, moving, etc) using the session methods available as like the ones available in the ``runpandas.types.metrics.MetricsAcessor`` . By the way, those methods are called inside each metric method, but applying in each of activities separatedely.
```
#In this example we compute the distance and the distance per position across all workouts
session = session.session.distance()
session
#comput the speed for each activity
session = session.session.speed(from_distances=True)
#compute the pace for each activity
session = session.session.pace()
#compute the inactivity periods for each activity
session = session.session.only_moving()
session
```
After all the computation done, let's going to the next step: the exploration and get some descriptive statistics.
## Exploring the data
After the loading and metrics computation for all the activities, now let's look further the data and get the basic summaries about the sessions: time spent, total distance, mean speed and other insightful statistics in each running activity. For this task, we may accomplish it by calling the method ``runpandas.types.session._SessionAcessor.summarize`` . It will return a basic Dataframe including all the aggregated statistics per activity from the season frame.
```
summary = session.session.summarize()
summary
```
Here, some descriptive statistics:
```
summary['day_diff'] = summary.index.to_series().diff().astype('timedelta64[D]').astype('Int64')
summary['pace_moving_all_mean'] = summary.mean_moving_pace.mean()
summary['distance_all_mean'] = round(summary.total_distance.mean()/1000,2)
summary['mean_speed'] = summary['mean_speed'] * 3.6 #convert from m/s to km/h
summary['max_speed'] = summary['max_speed'] * 3.6 #convert from m/s to km/h
summary['mean_moving_speed'] = summary['mean_moving_speed'] * 3.6 #convert from m/s to km/h
print('Session Interval:', (summary.index.to_series().max() - summary.index.to_series().min()).days, 'days')
print('Total Workouts:', len(summary), 'runnings')
print('Tota KM Distance:', summary['total_distance'].sum() / 1000)
print('Running Intervals (average):' , round(summary.day_diff.mean(), 2), 'days')
print('Average Pace (all runs):', summary.mean_pace.mean())
print('Average Moving Pace (all runs):', summary.mean_moving_pace.mean())
print('Average KM Distance (all runs):', round(summary.total_distance.mean()/ 1000,2))
```
As we can see above, we analyzed the period of 366 days (one year) of running workouts. In this period, she ran 68 times which achieved the total distance of 491 km! The average interval in days between two actitivies is 5 days, with average moving pace of 06'02" per km and average distance of 7.23km! Great numbers for a starter runner!
## Exploring the data with some visualizations
At this point, I have the data to start some visualization and analysis. The first question that popped was : How was the evolution of my average moving pace through all the runs and the corresponding average? Let's use the ``matplotlib`` package to illustrate the possible answer.
```
#let's convert the pace to float number in minutes
import datetime
summary['mean_moving_pace_float'] = summary['mean_moving_pace'] / datetime.timedelta(minutes=1)
summary['pace_moving_all_mean_float'] = summary['pace_moving_all_mean'] / datetime.timedelta(minutes=1)
#replace NA values with 0
summary['day_diff'].fillna(0, inplace=True)
import matplotlib.pyplot as plt
plt.subplots(figsize=(8, 5))
plt.plot(summary.index, summary.mean_moving_pace_float, color='silver')
plt.plot(summary.pace_moving_all_mean_float, color='purple', linestyle='dashed', label='average')
plt.title("Pace Evolution")
plt.xlabel("Runnings")
plt.ylabel("Pace")
plt.legend()
```
We can see some outliers at the running workouts at september 2020. Let's do some filtering (excluding those with "unreasonable" paces that might be mislabelled)
```
summary_without_outliers = summary[summary['mean_moving_pace_float'].between(5,10)]
plt.plot(summary_without_outliers.index, summary_without_outliers.mean_moving_pace_float, color='silver')
plt.plot(summary_without_outliers.pace_moving_all_mean_float, color='purple', linestyle='dashed', label='average')
plt.title("Pace Evolution")
plt.xlabel("Runnings")
plt.xticks(rotation=90)
plt.ylabel("Pace")
plt.legend()
```
That looks much better now. Now we can see clearly that her pace kept floating between 6'50 min and 6'00 min over all this year. I also created a new variable at my summary dataframe called ``day_diff``, which means the interval in day between consecutive workouts. This variable helped me to see if her pace grew up as the number of the days without running also increased.
```
#recompute the day_diff removing the outliers
summary_without_outliers['day_diff'] = summary_without_outliers.index.to_series().diff().astype('timedelta64[D]').astype('Int64')
summary_without_outliers['day_diff'].fillna(0, inplace=True)
fig,ax = plt.subplots(figsize=(8, 5))
ax.plot(summary_without_outliers.index, summary_without_outliers.mean_moving_pace_float, color='silver')
ax.set_xlabel('Runnings')
ax.set_ylabel('Pace',color='silver')
ax2=ax.twinx()
ax2.plot(summary_without_outliers.index, summary_without_outliers.day_diff.astype('int'),color='purple')
ax2.set_ylabel('Days Without Running',color='indigo', rotation=270)
plt.title('Pace vs Days without Running')
plt.show()
```
As we can see at the chart above, there isn't a linear correlation between the number of the days without running and her growing pace. In her case it might be the period of rest helped her to a better recovery. Can we have a statistical evidence ?
The following code will create a regression plot of her interval of days vs pace. I will use Seaborn to create plot. As we can see there is negative correlation between the two variables, although, it's not strong.
```
import seaborn as sns
sns.set(style="ticks", context="talk")
sns.regplot(x=summary_without_outliers.day_diff.astype('int'), y=summary_without_outliers.mean_moving_pace_float).set_title("Day Diff vs Pace")
```
Let's explore now the relationship between the mean pace and the average speed. It is obvious that the bigger the average speed the pace is lower. More fast means less time to cover the distance.
```
plt.subplots(figsize=(8, 5))
plt.plot(summary_without_outliers.index, summary_without_outliers.mean_moving_pace_float, color='silver', label='Average Pace')
plt.plot(summary_without_outliers.mean_moving_speed, color='purple', label = 'Average Speed')
plt.title("Average Speed x Average Pace")
plt.xlabel("Runnings")
plt.legend()
plt.show()
```
The chart below illustrates the evolution of the total distance covered at her runnings workouts. There is a higher variance of her run distances. The average distance for most of the runs is close to 7km. The second chart illustrating the histogram of the run distances show that most of her usual weekday runs are around 4-7km and my longer weekend runs (she started to run longer distances above 7kms very recently).
```
plt.subplots(figsize=(8, 5))
plt.plot(summary_without_outliers.index, summary_without_outliers.total_distance / 1000, color='silver')
plt.plot(summary_without_outliers.distance_all_mean, color='purple', linestyle='dashed', label='average')
plt.title("Distance Evolution")
plt.xlabel("Runs")
plt.ylabel("distance")
plt.legend()
plt.show()
(summary_without_outliers['total_distance'] / 1000.0).hist(bins=30)
```
Now let's see how faster she was and if there is a distinction between longer and shorter runs related to the speed (slower vs faster). Let's try plotting distance against speed to get a better idea.
```
plt.subplots(figsize=(8, 5))
plt.scatter(summary_without_outliers.total_distance/ 1000, summary_without_outliers.mean_moving_pace_float, color='purple', marker='s')
plt.title("Distance vs. pace")
plt.xlabel("Distance")
plt.ylabel("Pace")
plt.legend()
```
The distribution of her pace across the distance runs is very dispersed. It means a no clear trend there. But what if we restrict it just to this year ?
```
summary_without_outliers_2021 = summary_without_outliers[summary_without_outliers.index > '2021-01-01']
summary_without_outliers_2021
plt.subplots(figsize=(8, 5))
plt.scatter(summary_without_outliers_2021.total_distance/ 1000, summary_without_outliers_2021.mean_moving_pace_float, color='purple', marker='s')
plt.title("Distance vs. pace")
plt.xlabel("Distance")
plt.ylabel("Pace")
plt.legend()
```
It still does not give us a clear trend, but it depicts that she really started to achieve better performance when she started to run longer distances (paces loweer then 6'20 when she started to run above 10km).
Finally, let's see her performance evolution in pace over time in 5km, 10km and 15kms. For this analysis, I had to filter the session dataframe based on distances. In this tutorial, I will assume that the distances between 5km - 5.9km , 10km - 10.9km, 15km - 15.9km will be normalized as 5, 10 , 15km.
```
summary_without_outliers_5km = summary_without_outliers[summary_without_outliers['total_distance'].between(5000,5900)]
summary_without_outliers_10km = summary_without_outliers[summary_without_outliers['total_distance'].between(10000,10900)]
summary_without_outliers_15km = summary_without_outliers[summary_without_outliers['total_distance'].between(15000,15900)]
fig, axs = plt.subplots(3, sharex=True, figsize=(18, 16))
fig.suptitle('Average Moving Pace over time (5km, 10km, 15km)')
axs[0].plot(summary_without_outliers_5km.index, summary_without_outliers_5km.mean_moving_pace_float, marker='*')
axs[0].set_title('5km')
axs[1].plot(summary_without_outliers_10km.index, summary_without_outliers_10km.mean_moving_pace_float, marker='*')
axs[1].set_title('10km')
axs[2].plot(summary_without_outliers_15km.index, summary_without_outliers_15km.mean_moving_pace_float, marker='*')
axs[2].set_title('15km')
plt.xlabel('Date')
plt.show()
```
Her 5km got better until semptember 2020, but as she started to get greater distances, her performance got more slower. He 10km got better across 2021. Her 15km, since we only have one observation, there's no insight/trend there.
## Conclusions
* So, as expected, no major insight. However, always running help he to improve her performance, in order to maintain or lower her pace.
* She started to run greater distances with excelent paces, for instance the distance run of 15km with the moving pace 5'50.
* There is no trend about her run distance vs pace. We believe that we need to collect more data to might see any new insights.
In this tutorial, we showed the possibilities of using `runpandas` python package to perform several type of running analysis assisted by visualization and data handling packages such as Matplotlib and pandas. With the introduction of session feature, we now can analyse a group of activities and investigate new insights over time.
| github_jupyter |
# Import necessary depencencies
```
import pandas as pd
import numpy as np
import text_normalizer as tn
import model_evaluation_utils as meu
np.set_printoptions(precision=2, linewidth=80)
```
# Load and normalize data
```
dataset = pd.read_csv(r'movie_reviews.csv')
# take a peek at the data
print(dataset.head())
reviews = np.array(dataset['review'])
sentiments = np.array(dataset['sentiment'])
# build train and test datasets
train_reviews = reviews[:35000]
train_sentiments = sentiments[:35000]
test_reviews = reviews[35000:]
test_sentiments = sentiments[35000:]
# normalize datasets
norm_train_reviews = tn.normalize_corpus(train_reviews)
norm_test_reviews = tn.normalize_corpus(test_reviews)
```
# Traditional Supervised Machine Learning Models
## Feature Engineering
```
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
# build BOW features on train reviews
cv = CountVectorizer(binary=False, min_df=0.0, max_df=1.0, ngram_range=(1,2))
cv_train_features = cv.fit_transform(norm_train_reviews)
# build TFIDF features on train reviews
tv = TfidfVectorizer(use_idf=True, min_df=0.0, max_df=1.0, ngram_range=(1,2),
sublinear_tf=True)
tv_train_features = tv.fit_transform(norm_train_reviews)
# transform test reviews into features
cv_test_features = cv.transform(norm_test_reviews)
tv_test_features = tv.transform(norm_test_reviews)
print('BOW model:> Train features shape:', cv_train_features.shape, ' Test features shape:', cv_test_features.shape)
print('TFIDF model:> Train features shape:', tv_train_features.shape, ' Test features shape:', tv_test_features.shape)
```
## Model Training, Prediction and Performance Evaluation
```
from sklearn.linear_model import SGDClassifier, LogisticRegression
lr = LogisticRegression(penalty='l2', max_iter=100, C=1)
svm = SGDClassifier(loss='hinge', n_iter=100)
# Logistic Regression model on BOW features
lr_bow_predictions = meu.train_predict_model(classifier=lr,
train_features=cv_train_features, train_labels=train_sentiments,
test_features=cv_test_features, test_labels=test_sentiments)
meu.display_model_performance_metrics(true_labels=test_sentiments, predicted_labels=lr_bow_predictions,
classes=['positive', 'negative'])
# Logistic Regression model on TF-IDF features
lr_tfidf_predictions = meu.train_predict_model(classifier=lr,
train_features=tv_train_features, train_labels=train_sentiments,
test_features=tv_test_features, test_labels=test_sentiments)
meu.display_model_performance_metrics(true_labels=test_sentiments, predicted_labels=lr_tfidf_predictions,
classes=['positive', 'negative'])
svm_bow_predictions = meu.train_predict_model(classifier=svm,
train_features=cv_train_features, train_labels=train_sentiments,
test_features=cv_test_features, test_labels=test_sentiments)
meu.display_model_performance_metrics(true_labels=test_sentiments, predicted_labels=svm_bow_predictions,
classes=['positive', 'negative'])
svm_tfidf_predictions = meu.train_predict_model(classifier=svm,
train_features=tv_train_features, train_labels=train_sentiments,
test_features=tv_test_features, test_labels=test_sentiments)
meu.display_model_performance_metrics(true_labels=test_sentiments, predicted_labels=svm_tfidf_predictions,
classes=['positive', 'negative'])
```
# Newer Supervised Deep Learning Models
```
import gensim
import keras
from keras.models import Sequential
from keras.layers import Dropout, Activation, Dense
from sklearn.preprocessing import LabelEncoder
```
## Prediction class label encoding
```
le = LabelEncoder()
num_classes=2
# tokenize train reviews & encode train labels
tokenized_train = [tn.tokenizer.tokenize(text)
for text in norm_train_reviews]
y_tr = le.fit_transform(train_sentiments)
y_train = keras.utils.to_categorical(y_tr, num_classes)
# tokenize test reviews & encode test labels
tokenized_test = [tn.tokenizer.tokenize(text)
for text in norm_test_reviews]
y_ts = le.fit_transform(test_sentiments)
y_test = keras.utils.to_categorical(y_ts, num_classes)
# print class label encoding map and encoded labels
print('Sentiment class label map:', dict(zip(le.classes_, le.transform(le.classes_))))
print('Sample test label transformation:\n'+'-'*35,
'\nActual Labels:', test_sentiments[:3], '\nEncoded Labels:', y_ts[:3],
'\nOne hot encoded Labels:\n', y_test[:3])
```
## Feature Engineering with word embeddings
```
# build word2vec model
w2v_num_features = 500
w2v_model = gensim.models.Word2Vec(tokenized_train, size=w2v_num_features, window=150,
min_count=10, sample=1e-3)
def averaged_word2vec_vectorizer(corpus, model, num_features):
vocabulary = set(model.wv.index2word)
def average_word_vectors(words, model, vocabulary, num_features):
feature_vector = np.zeros((num_features,), dtype="float64")
nwords = 0.
for word in words:
if word in vocabulary:
nwords = nwords + 1.
feature_vector = np.add(feature_vector, model[word])
if nwords:
feature_vector = np.divide(feature_vector, nwords)
return feature_vector
features = [average_word_vectors(tokenized_sentence, model, vocabulary, num_features)
for tokenized_sentence in corpus]
return np.array(features)
# generate averaged word vector features from word2vec model
avg_wv_train_features = averaged_word2vec_vectorizer(corpus=tokenized_train, model=w2v_model,
num_features=500)
avg_wv_test_features = averaged_word2vec_vectorizer(corpus=tokenized_test, model=w2v_model,
num_features=500)
# feature engineering with GloVe model
train_nlp = [tn.nlp(item) for item in norm_train_reviews]
train_glove_features = np.array([item.vector for item in train_nlp])
test_nlp = [tn.nlp(item) for item in norm_test_reviews]
test_glove_features = np.array([item.vector for item in test_nlp])
print('Word2Vec model:> Train features shape:', avg_wv_train_features.shape, ' Test features shape:', avg_wv_test_features.shape)
print('GloVe model:> Train features shape:', train_glove_features.shape, ' Test features shape:', test_glove_features.shape)
```
## Modeling with deep neural networks
### Building Deep neural network architecture
```
def construct_deepnn_architecture(num_input_features):
dnn_model = Sequential()
dnn_model.add(Dense(512, activation='relu', input_shape=(num_input_features,)))
dnn_model.add(Dropout(0.2))
dnn_model.add(Dense(512, activation='relu'))
dnn_model.add(Dropout(0.2))
dnn_model.add(Dense(512, activation='relu'))
dnn_model.add(Dropout(0.2))
dnn_model.add(Dense(2))
dnn_model.add(Activation('softmax'))
dnn_model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
return dnn_model
w2v_dnn = construct_deepnn_architecture(num_input_features=500)
```
### Visualize sample deep architecture
```
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(w2v_dnn, show_shapes=True, show_layer_names=False,
rankdir='TB').create(prog='dot', format='svg'))
```
### Model Training, Prediction and Performance Evaluation
```
batch_size = 100
w2v_dnn.fit(avg_wv_train_features, y_train, epochs=5, batch_size=batch_size,
shuffle=True, validation_split=0.1, verbose=1)
y_pred = w2v_dnn.predict_classes(avg_wv_test_features)
predictions = le.inverse_transform(y_pred)
meu.display_model_performance_metrics(true_labels=test_sentiments, predicted_labels=predictions,
classes=['positive', 'negative'])
glove_dnn = construct_deepnn_architecture(num_input_features=300)
batch_size = 100
glove_dnn.fit(train_glove_features, y_train, epochs=5, batch_size=batch_size,
shuffle=True, validation_split=0.1, verbose=1)
y_pred = glove_dnn.predict_classes(test_glove_features)
predictions = le.inverse_transform(y_pred)
meu.display_model_performance_metrics(true_labels=test_sentiments, predicted_labels=predictions,
classes=['positive', 'negative'])
```
| github_jupyter |
[Sascha Spors](https://orcid.org/0000-0001-7225-9992),
Professorship Signal Theory and Digital Signal Processing,
[Institute of Communications Engineering (INT)](https://www.int.uni-rostock.de/),
Faculty of Computer Science and Electrical Engineering (IEF),
[University of Rostock, Germany](https://www.uni-rostock.de/en/)
# Tutorial Signals and Systems (Signal- und Systemtheorie)
Summer Semester 2021 (Bachelor Course #24015)
- lecture: https://github.com/spatialaudio/signals-and-systems-lecture
- tutorial: https://github.com/spatialaudio/signals-and-systems-exercises
WIP...
The project is currently under heavy development while adding new material for the summer semester 2021
Feel free to contact lecturer [frank.schultz@uni-rostock.de](https://orcid.org/0000-0002-3010-0294)
# Exercise 8: Discrete-Time Convolution
```
import matplotlib.pyplot as plt
import numpy as np
#from matplotlib.ticker import MaxNLocator
#from scipy import signal
# we create a undersampled and windowed impulse response of a RC-circuit lowpass
TRC = 1/6 # time constant in s
wRC = 1/TRC # cutoff angular frequency in rad/s
ws = 200/3*wRC # sampling angular frequency in rad/s, this yields aliasing!!
fs = ws/(2*np.pi) # sampling frequency in Hz
Ts = 1/fs # sampling intervall s
w = np.linspace(-10*ws, ws*10, 2**11) # angular frequency in rad/s
s = 1j*w # laplace variable along im-axis in rad/s
H = 1 / (s/wRC + 1) # frequency response
k = np.arange(np.int32(np.ceil(0.5/Ts)+1)) # sample index
h = (1/TRC * np.exp(-k*Ts/TRC)) # sampled impulse response, windowed!!
# normalize to achieve h[k=0] = 1, cf. convolution_ct_example2_AF3B15E0D3.ipynb
h *= TRC
Nh = h.size
kh = 0 # start of impulse response
plt.figure(figsize=(6, 6))
plt.subplot(2, 1, 1)
for nu in np.arange(-4, 5, 1):
plt.plot(w+nu*ws, 20*np.log10(np.abs(H)), 'C1')
plt.plot(w, 20*np.log10(np.abs(H)))
plt.plot([ws/2, ws/2], [-40, 0], 'C7')
plt.xticks(ws*np.arange(-4, 5, 1))
plt.xlim(-4*ws, +4*ws)
plt.ylim(-40, 0)
plt.xlabel(r'$\omega$ / (rad/s)')
plt.ylabel(r'$20 \log_{10} |H(\omega)|$')
plt.grid(True)
plt.subplot(2, 1, 2)
plt.stem(k*Ts, h, use_line_collection=True,
linefmt='C0:', markerfmt='C0o', basefmt='C0:',
label=r'$h_d[k] = h[k T_s] \cdot T_{RC} = \mathrm{e}^{-k\cdot\frac{T_s}{T_{RC}}}$')
plt.xlabel(r'$k \cdot T_s$')
plt.legend()
plt.grid(True)
print(Ts, ws)
# signal
x = 2*np.ones(np.int32(np.ceil(2 / Ts))) # non-zero elements
Nx = x.size
kx = np.int32(np.ceil(1/Ts)) # start index for first non-zero entry
# discrete-time convolution
Ny = Nx+Nh-1
ky = kx+kh
y = np.convolve(x, h)
plt.figure(figsize=(12, 4))
k = np.arange(kx, kx+Nx)
ax = plt.subplot(1, 3, 1)
plt.stem(k*Ts, x, use_line_collection=True,
linefmt='C0:', markerfmt='C0.', basefmt='C0:',
label=r'$x[k]$')
plt.xlim(1, 3)
plt.xlabel(r'$k \cdot T_s$ / s')
plt.legend(loc='upper right')
k = np.arange(kh, kh+Nh)
ax = plt.subplot(1, 3, 2)
plt.stem(k*Ts, h, use_line_collection=True,
linefmt='C1:', markerfmt='C1.', basefmt='C1:',
label=r'$h[k]$')
plt.xlim(0, 0.5)
plt.ylim(0, 1)
plt.yticks(np.arange(0, 1.25, 0.25))
plt.xlabel(r'$k \cdot T_s$ / s')
plt.legend(loc='upper right')
plt.grid(True)
k = np.arange(ky, ky+Ny)
ax = plt.subplot(1, 3, 3)
plt.stem(k*Ts, y*Ts, use_line_collection=True,
linefmt='C2:', markerfmt='C2.', basefmt='C2:',
label=r'$y[k]\,/\,T_s = x[k]\ast h[k]$')
tmp = (1-np.exp(-3))/3
plt.plot([1, 3.5], [tmp, tmp], 'C3')
plt.xlim(1, 3.5)
plt.ylim(0, 0.4)
plt.yticks(np.arange(0, 0.5, 0.1))
plt.xlabel(r'$k \cdot T_s$ / s')
plt.legend(loc='upper right')
plt.grid(True)
plt.savefig('convolution_discrete_pt1_xhy.pdf')
plt.figure(figsize=(8, 4))
k = np.arange(ky, ky+Ny)
ax = plt.subplot(1, 2, 1)
plt.stem(k*Ts, y*Ts, use_line_collection=True,
linefmt='C2:', markerfmt='C2o', basefmt='C2:',
label=r'$y[k]\,/\,T_s = x[k]\ast h[k]$')
tmp = (1-np.exp(-3))/3
plt.plot([1, 3.5], [tmp, tmp], 'C3')
plt.xlim(1, 1.5)
plt.ylim(0, 0.4)
plt.yticks(np.arange(0, 0.5, 0.1))
plt.xlabel(r'$k \cdot T_s$ / s')
plt.legend(loc='upper right')
plt.grid(True)
ax = plt.subplot(1, 2, 2)
plt.stem(k, y*Ts, use_line_collection=True,
linefmt='C2:', markerfmt='C2o', basefmt='C2:',
label=r'$y[k]\,/\,T_s = x[k]\ast h[k]$')
tmp = (1-np.exp(-3))/3
plt.plot([1/Ts, 3.5/Ts], [tmp, tmp], 'C3')
plt.xlim(1/Ts, 1.5/Ts)
plt.ylim(0, 0.4)
plt.yticks(np.arange(0, 0.5, 0.1))
plt.xlabel(r'$k$')
plt.legend(loc='upper right')
plt.grid(True)
plt.savefig('convolution_discrete_pt1_y_over_kt_zoom.pdf')
```
## Copyright
This tutorial is provided as Open Educational Resource (OER), to be found at
https://github.com/spatialaudio/signals-and-systems-exercises
accompanying the OER lecture
https://github.com/spatialaudio/signals-and-systems-lecture.
Both are licensed under a) the Creative Commons Attribution 4.0 International
License for text and graphics and b) the MIT License for source code.
Please attribute material from the tutorial as *Frank Schultz,
Continuous- and Discrete-Time Signals and Systems - A Tutorial Featuring
Computational Examples, University of Rostock* with
``main file, github URL, commit number and/or version tag, year``.
| github_jupyter |
# PixdosepiX-OpenKBP---2020-AAPM-Grand-Challenge-
## Introduction
The aim of the OpenKBP Challenge is to advance fair and consistent comparisons of dose prediction methods for knowledge-based planning (KBP). Participants of the challenge will use a large dataset to train, test, and compare their prediction methods, using a set of standardized metrics, with those of other participants.
## Get and prepare data
```
!wget "###REPLACE WITH LINK TO DATASET IN CODALAB###"
from google.colab import drive
drive.mount('/content/drive')
!mv /content/e25ae3d9-03e1-4d2c-8af2-f9991193f54b train.zip
!unzip train.zip
!rm "/content/train-pats/.DS_Store"
!rm "/content/validation-pats-no-dose/.DS_Store"
```
## Import libraries
```
%tensorflow_version 2.x
import shutil
import json
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import tensorflow as tf
from tensorflow.keras import *
from tensorflow.keras.layers import *
from IPython.display import clear_output
```
## Data loader and general functions
```
def create_hist(img):
h = np.squeeze(img).flatten()*100
return h
def create_out_file(in_url,out_url):
dir_main = os.listdir(in_url)
for patient in dir_main:
os.mkdir(out_url + "/" + patient)
def unravel_ct_dose(data_img):
array = np.zeros((128,128,128))
indices = tuple(map(tuple,np.unravel_index(tuple(data_img.index),(128,128,128),order="C")))
array[indices] = data_img.data.values
return array
def unravel_masks(data_img):
array = np.zeros((128,128,128))
indices = tuple(map(tuple,np.unravel_index(tuple(data_img.index),(128,128,128),order="C")))
array[indices] = 1
return array
def decode_to_CT_Dose(url_element):
array = pd.read_csv(url_element,index_col=0)
array = np.expand_dims(np.expand_dims(unravel_ct_dose(array),axis = 0),axis = 4)
return array
def decode_unique_mask(url_element):
array = pd.read_csv(url_element,index_col=0)
array = np.expand_dims(np.expand_dims(unravel_masks(array),axis = 0),axis = 4)
return array
def decode_voxel_dimensions(url_element):
array = np.loadtxt(url_element)
return array
def decode_fusion_maks(link,list_name_masks,dict_num_mask):
masks = np.zeros([1,128,128,128,10])
organs_patien = os.listdir(link)
for name in list_name_masks:
if name + ".csv" in organs_patien:
dir_mask = link + "/" + name + ".csv"
array = pd.read_csv(dir_mask,index_col=0)
array = unravel_masks(array)
masks[0,:,:,:,dict_num_mask[name]] = array
return masks
def get_patient_list(url_main):
return os.listdir(url_main)
def load_patient_train(dir_patient):
dict_images = {"ct":None,"dose":None,"masks":None}
ct = decode_to_CT_Dose(dir_patient + "/" + "ct.csv")
dose = decode_to_CT_Dose(dir_patient + "/" + "dose.csv")
list_masks = ['Brainstem',
'SpinalCord',
'RightParotid',
'LeftParotid',
'Esophagus',
'Larynx',
'Mandible',
'PTV56',
'PTV63',
'PTV70']
dict_num_mask = {"Brainstem":0,
"SpinalCord":1,
"RightParotid":2,
"LeftParotid":3,
"Esophagus":4,
"Larynx":5,
"Mandible":6,
"PTV56":7,
"PTV63":8,
"PTV70":9}
masks = decode_fusion_maks(dir_patient,list_masks,dict_num_mask)
dict_images["ct"] = ct
dict_images["dose"] = dose
dict_images["masks"] = masks
return dict_images
def load_patient(dir_patient):
dict_images = {"ct":None,"dose":None,"possible_dose_mask":None,"voxel_dimensions":None,"masks":None}
ct = decode_to_CT_Dose(dir_patient + "/" + "ct.csv")
dose = decode_to_CT_Dose(dir_patient + "/" + "dose.csv")
possible_dose_mask = decode_unique_mask(dir_patient + "/" + "possible_dose_mask.csv")
voxel_dimensions = decode_voxel_dimensions(dir_patient + "/" + "voxel_dimensions.csv")
list_masks = ['Brainstem',
'SpinalCord',
'RightParotid',
'LeftParotid',
'Esophagus',
'Larynx',
'Mandible',
'PTV56',
'PTV63',
'PTV70']
dict_num_mask = {"Brainstem":0,
"SpinalCord":1,
"RightParotid":2,
"LeftParotid":3,
"Esophagus":4,
"Larynx":5,
"Mandible":6,
"PTV56":7,
"PTV63":8,
"PTV70":9}
masks = decode_fusion_maks(dir_patient,list_masks,dict_num_mask)
dict_images["ct"] = ct
dict_images["dose"] = dose
dict_images["possible_dose_mask"] = possible_dose_mask
dict_images["voxel_dimensions"] = voxel_dimensions
dict_images["masks"] = masks
return dict_images
def load_patient_test(dir_patient):
dict_images = {"ct":None,"possible_dose_mask":None,"voxel_dimensions":None,"masks":None}
ct = decode_to_CT_Dose(dir_patient + "/" + "ct.csv")
possible_dose_mask = decode_unique_mask(dir_patient + "/" + "possible_dose_mask.csv")
voxel_dimensions = decode_voxel_dimensions(dir_patient + "/" + "voxel_dimensions.csv")
list_masks = ['Brainstem',
'SpinalCord',
'RightParotid',
'LeftParotid',
'Esophagus',
'Larynx',
'Mandible',
'PTV56',
'PTV63',
'PTV70']
dict_num_mask = {"Brainstem":0,
"SpinalCord":1,
"RightParotid":2,
"LeftParotid":3,
"Esophagus":4,
"Larynx":5,
"Mandible":6,
"PTV56":7,
"PTV63":8,
"PTV70":9}
masks = decode_fusion_maks(dir_patient,list_masks,dict_num_mask)
dict_images["ct"] = ct
dict_images["possible_dose_mask"] = possible_dose_mask
dict_images["voxel_dimensions"] = voxel_dimensions
dict_images["masks"] = masks
return dict_images
url_train = "/content/train-pats"
patients = get_patient_list(url_train)
for i,patient in enumerate(patients):
patients[i] = os.path.join(url_train,patient)
def load_images_to_net(patient_url):
images = load_patient_train(patient_url)
ct = tf.cast(np.where(images["ct"] <= 4500,images["ct"],0),dtype=tf.float32)
ct = (2*ct/4500) - 1
masks = tf.cast(images["masks"],dtype=tf.float32)
dose = tf.cast(np.where(images["dose"] <= 100,images["dose"],0),dtype=tf.float32)
dose = (2*dose/100) - 1
return ct,masks,dose
def load_images_to_net_test(patient_url):
images = load_patient_test(patient_url)
ct = ct = tf.cast(np.where(images["ct"] <= 4500,images["ct"],0),dtype=tf.float32)
ct = (2*ct/4500) - 1
masks = tf.cast(images["masks"],dtype=tf.float32)
possible_dose_mask = tf.cast(images["possible_dose_mask"],dtype=tf.float32)
voxel_dimensions = tf.cast(images["voxel_dimensions"],dtype=tf.float32)
return ct,masks,possible_dose_mask,voxel_dimensions
```
## Architecture

https://blog.paperspace.com/unpaired-image-to-image-translation-with-cyclegan/
### Create downsample and upsample functions
```
def downsample(filters, apply_batchnorm=True):
result = Sequential()
initializer = tf.random_normal_initializer(0,0.02)
#capa convolucional
result.add(Conv3D(filters,
kernel_size = 4,
strides = 2,
padding = "same",
kernel_initializer = initializer,
use_bias = not apply_batchnorm))
# Capa de batch normalization
if apply_batchnorm:
result.add(BatchNormalization())
#Capa de activacion (leak relu)
result.add(ReLU())
return result
def upsample(filters, apply_dropout=False):
result = Sequential()
initializer = tf.random_normal_initializer(0,0.02)
#capa convolucional
result.add(Conv3DTranspose(filters,
kernel_size = 4,
strides = 2,
padding = "same",
kernel_initializer = initializer,
use_bias = False))
# Capa de batch normalization
result.add(BatchNormalization())
if apply_dropout:
result.add(Dropout(0.5))
#Capa de activacion (leak relu)
result.add(ReLU())
return result
```
### Create generator-net
```
def Generator():
ct_image = Input(shape=[128,128,128,1])
roi_masks = Input(shape=[128,128,128,10])
inputs = concatenate([ct_image, roi_masks])
down_stack = [
downsample(64, apply_batchnorm=False), # (64x64x64x64)
downsample(128), #32 (32x32x32x128)
downsample(256), #16 (16x16x16x16x256)
downsample(512), #8 (8x8x8x512)
downsample(512), #4 (4x4x4x512)
downsample(512), #2 (2x2x2x512)
downsample(512), #1 (1x1x1x512)
]
up_stack = [
upsample(512,apply_dropout=True), #2 (2x2x2x512)
upsample(512,apply_dropout=True), #4 (4x4x4x512)
upsample(512), #8 (8x8x8x512)
upsample(256), #16 (16x16x16x256)
upsample(128), #32 (32x32x32x128)
upsample(64), #64 (64x64x64x64)
]
initializer = tf.random_normal_initializer(0,0.02)
last = Conv3DTranspose(filters=1,
kernel_size = 4,
strides = 2,
padding = "same",
kernel_initializer = initializer,
activation = "tanh") #(128x128x128x3)
x = inputs
s = []
concat = Concatenate()
for down in down_stack:
x = down(x)
s.append(x)
s = reversed(s[:-1])
for up,sk in zip(up_stack,s):
x = up(x)
x = concat([x,sk])
last = last(x)
return Model(inputs = [ct_image,roi_masks], outputs = last)
generator = Generator()
```
### Run generator-net
```
ct,masks,dose = load_images_to_net("/content/train-pats/pt_150")
gen_output = generator([ct,masks],training=True)
c = (ct[0,:,:,88,0]+1)/2
d = (dose[0,:,:,88,0]+1)/2
p = (gen_output[0,:,:,88,0]+1)/2
fig=plt.figure(figsize=(16, 16))
fig.add_subplot(3,3,1)
plt.title("ct")
plt.imshow(c)
fig.add_subplot(3,3,2)
plt.title("dose")
plt.imshow(d)
fig.add_subplot(3,3,3)
plt.title("predict")
plt.imshow(p)
fig.add_subplot(3,3,4)
plt.hist(create_hist(c), normed=True, bins=100,range=[1,100])
plt.ylabel('Probability')
fig.add_subplot(3,3,5)
plt.hist(create_hist(d), normed=True, bins=100,range=[1,100])
plt.ylabel('Probability')
fig.add_subplot(3,3,6)
plt.hist(create_hist(p), normed=True, bins=100,range=[1,100])
plt.ylabel('Probability')
fig.add_subplot(3,3,7)
plt.hist(create_hist((ct+1)/2),normed=True, bins=100,range=[1,100])
plt.ylabel('Probability')
fig.add_subplot(3,3,8)
plt.hist(create_hist((dose+1)/2), normed=True, bins=100,range=[1,100])
plt.ylabel('Probability')
fig.add_subplot(3,3,9)
plt.hist(create_hist((gen_output+1)/2), normed=True, bins=100,range=[1,100])
plt.ylabel('Probability')
plt.show()
```
### Create discriminator-net
```
def Discriminator():
ct_dis = Input(shape=[128,128,128,1], name = "ct_dis")
ct_masks = Input(shape=[128,128,128,10], name = "ct_masks")
dose_gen = Input(shape=[128,128,128,1], name = "dose_gen")
con = concatenate([ct_dis,ct_masks,dose_gen])
initializer = tf.random_normal_initializer(0,0.02)
down1 = downsample(64, apply_batchnorm = False)(con)
down2 = downsample(128)(down1)
down3 = downsample(256)(down2)
last = tf.keras.layers.Conv3D(filters = 1,
kernel_size = 4,
strides = 1,
kernel_initializer = initializer,
padding = "same")(down3)
return tf.keras.Model(inputs = [ct_dis,ct_masks,dose_gen],outputs = last)
discriminator = Discriminator()
```
### Run discriminator-net
```
disc_out = discriminator([ct,masks,gen_output],training = True)
w=16
h=16
fig=plt.figure(figsize=(8, 8))
columns = 4
rows = 4
for i in range(0, columns*rows):
fig.add_subplot(rows, columns, i +1)
plt.imshow(disc_out[0,:,:,i,0],vmin=-1,vmax=1,cmap = "RdBu_r")
plt.colorbar()
plt.show()
fig=plt.figure(figsize=(8, 8))
columns = 4
rows = 4
for i in range(0, columns*rows):
fig.add_subplot(rows, columns, i +1)
plt.imshow(disc_out[0,:,i,:,0],vmin=-1,vmax=1,cmap = "RdBu_r")
plt.colorbar()
plt.show()
fig=plt.figure(figsize=(8, 8))
columns = 4
rows = 4
for i in range(0, columns*rows):
fig.add_subplot(rows, columns, i +1)
plt.imshow(disc_out[0,i,:,:,0],vmin=-1,vmax=1,cmap = "RdBu_r")
plt.colorbar()
plt.show()
```
### Loss functions
```
# Funciones de coste adversarias
loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(disc_real_output,disc_generated_output):
#Diferencia entre los true por ser real y el detectado por el discriminador
real_loss = loss_object(tf.ones_like(disc_real_output),disc_real_output)
#Diferencia entre los false por ser generado y el detectado por el discriminador
generated_loss = loss_object(tf.zeros_like(disc_generated_output),disc_generated_output)
total_dics_loss = real_loss + generated_loss
return total_dics_loss
LAMBDA = 100
def generator_loss(disc_generated_output,gen_output,target):
gen_loss = loss_object(tf.ones_like(disc_generated_output),disc_generated_output)
#mean absolute error
l1_loss = tf.reduce_mean(tf.abs(target-gen_output))
total_gen_loss = gen_loss + (LAMBDA*l1_loss)
return total_gen_loss
```
### Configure checkpoint
```
import os
generator_optimizer = tf.keras.optimizers.Adam(2e-4,beta_1=0.5)
discriminator_optimizer = tf.keras.optimizers.Adam(2e-4,beta_1=0.5)
cpath = "/content/drive/My Drive/PAE_PYTHONQUANTIC/IA/OpenKBP/Auxiliary/checkpoint" #dir to checkpoints
checkpoint = tf.train.Checkpoint(generator_optimizer = generator_optimizer,
discriminator_optimizer = discriminator_optimizer,
generator = generator,
discriminator = discriminator)
```
## Train
### Train step
```
@tf.function
def train_step(ct,masks,dose):
with tf.GradientTape() as gen_tape, tf.GradientTape() as discr_tape:
output_image = generator([ct,masks], training=True)
output_gen_discr = discriminator([ct,masks,gen_output],training = True)
output_trg_discr = discriminator([ct,masks,dose], training = True)
discr_loss = discriminator_loss(output_trg_discr,output_gen_discr)
gen_loss = generator_loss(output_gen_discr,output_image,dose)
generator_grads = gen_tape.gradient(gen_loss, generator.trainable_variables)
discriminator_grads = discr_tape.gradient(discr_loss,discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(generator_grads,generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(discriminator_grads,discriminator.trainable_variables))
```
### Restore metrics
```
def update_metrics():
with open(cpath + 'metrics_GAN.json') as f:
metrics_GAN = json.load(f)
return metrics_GAN
metrics_GAN = {"gen_loss":[],"discr_loss":[]}
check = os.listdir(cpath)
if "metrics_GAN.json" in check:
print("upadte metrics")
metrics_GAN = update_metrics()
```
### Define train-loop
```
def train(epochs):
check = os.listdir(cpath)
if len(cpath) > 0:
if "state.txt" in check:
start = int(np.loadtxt(cpath + "/state.txt"))
print("upload checkpoint model")
checkpoint.restore(tf.train.latest_checkpoint(cpath+"/"+str(start)))
else:
start = 0
metrics_GAN["gen_loss"].append("epoch" + str(start))
metrics_GAN["discr_loss"].append("epoch" + str(start))
print("Start training in epoch",start)
for epoch in range(start,epochs):
np.random.shuffle(patients)
imgi = 0
for patient in patients:
ct,masks,dose = load_images_to_net(patient)
print("epoch " + str(epoch) + " - train: " + str(imgi) + "/" + str(len(patients)))
train_step(ct,masks,dose)
if imgi % 10 == 0:
output_image = generator([ct,masks], training=True)
output_gen_discr = discriminator([ct,masks,gen_output],training = True)
output_trg_discr = discriminator([ct,masks,dose], training = True)
discr_loss = discriminator_loss(output_trg_discr,output_gen_discr)
gen_loss = generator_loss(output_gen_discr,output_image,dose)
metrics_GAN["gen_loss"].append(str(np.mean(gen_loss)))
metrics_GAN["discr_loss"].append(str(np.mean(discr_loss)))
imgi += 1
clear_output(wait=True)
imgi = 0
metrics_GAN["gen_loss"].append("epoch" + str(epoch))
metrics_GAN["discr_loss"].append("epoch" + str(epoch))
# saving (checkpoint) the model every 20 epochs
if (epoch + 1) % 20 == 0:
with open(cpath + '/metrics_GAN.json', 'w') as fp:
json.dump(metrics_GAN, fp)
state = np.array([epoch+1])
np.savetxt(cpath + "/state.txt",state)
os.mkdir(cpath+"/"+str(epoch+1))
checkpoint_prefix = os.path.join(cpath+"/"+str(epoch+1),"ckpt")
checkpoint.save(file_prefix = checkpoint_prefix)
```
### Initialize train for epochs
```
train(230)
```
## Evaluate
### Restore model
```
def upload_model():
check = os.listdir(cpath)
if len(cpath) > 0:
if "state.txt" in check:
start = int(np.loadtxt(cpath + "/state.txt"))
print("upload checkpoint model")
checkpoint.restore(tf.train.latest_checkpoint(cpath+"/"+str(start)))
upload_model()
```
### Loader patient to evaluate
```
ct,masks,possible_dose_mask,voxel_dimensions = load_images_to_net_test("/content/validation-pats-no-dose/pt_201")
gen_output = generator([ct,masks],training=True)
gen_mask = ((gen_output+1)/2)*possible_dose_mask
plt.imshow(gen_mask[0,:,:,75,0])
plt.imshow((ct[0,:,:,75,0]+1)/2)
plt.imshow((gen_output[0,:,:,75,0]+1)/2)
w = np.squeeze(gen_mask ).flatten()*100
plt.hist(w, normed=True, bins=100,range=[1,100])
plt.ylabel('Probability')
x = (gen_output[0,:,:,:,0]+1)/2
y = masks[0,:,:,:,8]
z = x * y
w = np.squeeze(z).flatten()*100
plt.hist(w, normed=True, bins=100,range=[1,100])
plt.ylabel('Probability')
```
## Export doses predictions
```
@tf.function
def predict_step(ct,masks):
output_image = generator([ct,masks], training=True)
return output_image
url_main_results = "/content/drive/My Drive/PAE_PYTHONQUANTIC/IA/OpenKBP/Auxiliary/results_v1"
url_main_validation = "/content/validation-pats-no-dose"
def export_csv(predict_dose,patient):
dictionary = {"data":np.ravel(predict_dose,order="C")}
array = pd.DataFrame(dictionary)
array = array[array["data"] != 0]
array.to_csv(url_main_results + "/" + patient + ".csv")
def validation():
patients = get_patient_list(url_main_validation)
os.mkdir(url_main_results)
inimg = 0
for patient in patients:
ct,masks,possible_dose_mask,voxel_dimensions = load_images_to_net_test(url_main_validation + "/" + patient)
output_image = predict_step(ct,masks)
#output_image = generator([ct,masks], training=True)
predict_dose = np.squeeze((((output_image+1)/2)*possible_dose_mask)*100)
export_csv(predict_dose,patient)
print(" - predict: " + str(inimg) + "/" + str(len(patients)))
inimg = inimg + 1
validation()
shutil.make_archive('/content/drive/My Drive/PAE_PYTHONQUANTIC/IA/OpenKBP/Auxiliary/submisions_v1/baseline', 'zip','/content/drive/My Drive/PAE_PYTHONQUANTIC/IA/OpenKBP/Auxiliary/results_v1')
```
| github_jupyter |
# <b>Document AI features demo 1</b>
The AIServiceVisionClient offers the document <b>text detection</b> feature. This notebook aims to provide overall clarity about the feature to the user in terms of requirements, usage and the output of the API.<br>
<ul>
<li>The raw output is saved as <code>response_document_demo1.json</code> file. </li>
<li>Detected text is displayed under <b>Display the lines of text detected</b> section.</li>
<li>The user can visualize the bounding boxes for the detected text under <b>View output document with bounding boxes</b> section. </li>
</ul>
### Steps to run the notebook:
<details>
<summary>Notebook session setup</summary>
<ol>
<li><font size="2">Installing the OCI Vision SDK</font></li>
<li><font size="2">Installing other dependencies</font></li>
<li><font size="2">Setup sample input documents</font></li>
<li><font size="2">Setup helper .py files</font></li>
<li><font size="2">Create output folder</font></li>
</ol>
</details>
<details>
<summary>Importing the required modules</summary>
</details>
<details>
<summary>Setting the input variables</summary>
<font size="2">The user can change the input variables, if necessary. They have been assigned default values.</font>
</details>
<details>
<summary>Running the main pipeline</summary>
<font size="2">Run all cells to get the output in the <code>output</code> directory. </font><br>
</details>
### Notebook session setup
<details>
<summary>Instructions</summary>
<ul>
<li><font size="2">The user needs to setup only once.</font></li>
<li><font size="2">Uncomment the commented cells and run once to setup.</font></li>
<li><font size="2">Comment back the same cells to avoid running again.</font></li>
</ul>
</details>
#### Installing the OCI Vision SDK
```
# !wget "https://objectstorage.us-ashburn-1.oraclecloud.com/n/axhheqi2ofpb/b/vision-demo-notebooks/o/vision_service_python_client-0.3.45-py2.py3-none-any.whl"
# !pip install vision_service_python_client-0.3.45-py2.py3-none-any.whl
# !rm vision_service_python_client-0.3.45-py2.py3-none-any.whl
```
#### Installing other dependencies
```
# !pip install matplotlib==3.3.4
# !pip install pandas==1.1.5
```
#### Setup sample input documents
```
# !wget "https://objectstorage.us-ashburn-1.oraclecloud.com/n/axhheqi2ofpb/b/vision-demo-notebooks/o/TextDetectionOnePage.pdf"
# !wget "https://objectstorage.us-ashburn-1.oraclecloud.com/n/axhheqi2ofpb/b/vision-demo-notebooks/o/table.pdf"
# !mkdir data
# !mv TextDetectionOnePage.pdf data
# !mv table.pdf data
```
#### Setup helper .py files
```
# !wget "https://objectstorage.us-ashburn-1.oraclecloud.com/n/axhheqi2ofpb/b/vision-demo-notebooks/o/analyze_document_utils.py"
# !mkdir helper
# !mv analyze_document_utils.py helper
```
#### Create output folder
```
# !mkdir output
```
### Imports
```
import base64
import os
import io
import oci
import json
from IPython.display import IFrame
import requests
from vision_service_python_client.ai_service_vision_client import AIServiceVisionClient
from vision_service_python_client.models.analyze_document_details import AnalyzeDocumentDetails
from vision_service_python_client.models.inline_document_details import InlineDocumentDetails
from vision_service_python_client.models.document_text_detection_feature import DocumentTextDetectionFeature
from helper.analyze_document_utils import is_url, clean_output
```
### Set input variables
<details>
<summary><font size="3">input_path</font></summary>
<font size="2">The user can provide the document URL or filepath from the notebook session.</font><br>
</details>
```
input_path = "data/TextDetectionOnePage.pdf"
```
### Authorize user config
```
config = oci.config.from_file('~/.oci/config')
```
### Get input document
```
if is_url(input_path):
file_content = requests.get(input_path).content
encoded_string = base64.b64encode(file_content)
input_path = 'data/' + os.path.basename(input_path)
open(input_path, 'wb').write(file_content)
else:
with open(input_path, "rb") as document_file:
encoded_string = base64.b64encode(document_file.read())
```
### View input document
```
if is_url(input_path):
display(IFrame('data/' + os.path.basename(input_path), width=600, height=500))
else:
display(IFrame(input_path, width=600, height=500))
```
### Create AI service vision client and get response object
```
ai_service_vision_client = AIServiceVisionClient(config=config)
analyze_document_details = AnalyzeDocumentDetails()
inline_document_details = InlineDocumentDetails()
text_detection_feature = DocumentTextDetectionFeature()
text_detection_feature.generate_searchable_pdf = False
features = [text_detection_feature]
inline_document_details.data = encoded_string.decode('utf-8')
analyze_document_details.document = inline_document_details
analyze_document_details.features = features
res = ai_service_vision_client.analyze_document(analyze_document_details=analyze_document_details)
```
### Clean and save the API response as json
```
res_json = json.loads(repr(res.data))
clean_res = clean_output(res_json)
with open('output/response_document_demo1.json', 'w') as fp:
json.dump(clean_res, fp)
```
### Display the lines of text detected
```
for i, page in enumerate(clean_res['pages']):
print('**************** PAGE NO.', i+1, '****************\n')
for line in page['lines']:
print(line['text'])
print('\n')
```
### View output document with bounding boxes
The user can uncomment and run the cells below to visualize the bounding boxes over the document. This visualization feature is currently supported for <b>PDF format only.</b>
#### Install dependencies
```
# !pip install fitz==0.0.1.dev2
# !pip install pymupdf==1.18.19
```
#### Imports
```
from helper.analyze_document_utils import add_text_bounding_boxes_to_pdf
import fitz
```
#### Add bounding boxes
```
doc = fitz.open(input_path)
doc = add_text_bounding_boxes_to_pdf(doc, clean_res)
output_path = 'output/' + 'output_' + os.path.basename(input_path)
doc.save(output_path)
```
#### Display output document
```
display(IFrame(output_path, width=600, height=500))
```
| github_jupyter |
# Convolutional Networks
So far we have worked with deep fully-connected networks, using them to explore different optimization strategies and network architectures. Fully-connected networks are a good testbed for experimentation because they are very computationally efficient, but in practice all state-of-the-art results use convolutional networks instead.
First you will implement several layer types that are used in convolutional networks. You will then use these layers to train a convolutional network on the CIFAR-10 dataset.
```
import os
os.chdir(os.getcwd() + '/..')
# Run some setup code for this notebook
import random
import numpy as np
import matplotlib.pyplot as plt
from utils.data_utils import get_CIFAR10_data
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data('datasets/cifar-10-batches-py', subtract_mean=True)
for k, v in data.iteritems():
print('%s: ' % k, v.shape)
from utils.metrics_utils import rel_error
```
# Convolution: Naive forward pass
The core of a convolutional network is the convolution operation. In the file `layers/layers.py`, implement the forward pass for the convolution layer in the function `conv_forward_naive`.
You don't have to worry too much about efficiency at this point; just write the code in whatever way you find most clear.
You can test your implementation by running the following:
```
from layers.layers import conv_forward_naive
x_shape = (2, 3, 4, 4)
w_shape = (3, 3, 4, 4)
x = np.linspace(-0.1, 0.5, num=np.prod(x_shape)).reshape(x_shape)
w = np.linspace(-0.2, 0.3, num=np.prod(w_shape)).reshape(w_shape)
b = np.linspace(-0.1, 0.2, num=3)
conv_param = {'stride': 2, 'pad': 1}
out, _ = conv_forward_naive(x, w, b, conv_param)
correct_out = np.array([[[[-0.08759809, -0.10987781],
[-0.18387192, -0.2109216 ]],
[[ 0.21027089, 0.21661097],
[ 0.22847626, 0.23004637]],
[[ 0.50813986, 0.54309974],
[ 0.64082444, 0.67101435]]],
[[[-0.98053589, -1.03143541],
[-1.19128892, -1.24695841]],
[[ 0.69108355, 0.66880383],
[ 0.59480972, 0.56776003]],
[[ 2.36270298, 2.36904306],
[ 2.38090835, 2.38247847]]]])
# Compare your output to ours; difference should be around 2e-8
print('Testing conv_forward_naive')
print('difference: ', rel_error(out, correct_out))
```
# Aside: Image processing via convolutions
As fun way to both check your implementation and gain a better understanding of the type of operation that convolutional layers can perform, we will set up an input containing two images and manually set up filters that perform common image processing operations (grayscale conversion and edge detection). The convolution forward pass will apply these operations to each of the input images. We can then visualize the results as a sanity check.
```
from scipy.misc import imread, imresize
kitten, puppy = imread('test/kitten.jpg'), imread('test/puppy.jpg')
# kitten is wide, and puppy is already square
print kitten.shape, puppy.shape
d = kitten.shape[1] - kitten.shape[0]
kitten_cropped = kitten[:, d//2:-d//2, :]
print kitten_cropped.shape, puppy.shape
img_size = 200 # Make this smaller if it runs too slow
x = np.zeros((2, 3, img_size, img_size))
x[0, :, :, :] = imresize(puppy, (img_size, img_size)).transpose((2, 0, 1))
x[1, :, :, :] = imresize(kitten_cropped, (img_size, img_size)).transpose((2, 0, 1))
# Set up a convolutional weights holding 2 filters, each 3x3
w = np.zeros((2, 3, 3, 3))
# The first filter converts the image to grayscale.
# Set up the red, green, and blue channels of the filter.
w[0, 0, :, :] = [[0, 0, 0], [0, 0.3, 0], [0, 0, 0]]
w[0, 1, :, :] = [[0, 0, 0], [0, 0.6, 0], [0, 0, 0]]
w[0, 2, :, :] = [[0, 0, 0], [0, 0.1, 0], [0, 0, 0]]
# Second filter detects horizontal edges in the blue channel.
w[1, 2, :, :] = [[1, 2, 1], [0, 0, 0], [-1, -2, -1]]
# Vector of biases. We don't need any bias for the grayscale
# filter, but for the edge detection filter we want to add 128
# to each output so that nothing is negative.
b = np.array([0, 128])
# Compute the result of convolving each input in x with each filter in w,
# offsetting by b, and storing the results in out.
out, _ = conv_forward_naive(x, w, b, {'stride': 1, 'pad': 1})
def imshow_noax(img, normalize=True):
""" Tiny helper to show images as uint8 and remove axis labels """
if normalize:
img_max, img_min = np.max(img), np.min(img)
img = 255.0 * (img - img_min) / (img_max - img_min)
plt.imshow(img.astype('uint8'))
plt.gca().axis('off')
# Show the original images and the results of the conv operation
plt.subplot(2, 3, 1)
imshow_noax(puppy, normalize=False)
plt.title('Original image')
plt.subplot(2, 3, 2)
imshow_noax(out[0, 0])
plt.title('Grayscale')
plt.subplot(2, 3, 3)
imshow_noax(out[0, 1])
plt.title('Edges')
plt.subplot(2, 3, 4)
imshow_noax(kitten_cropped, normalize=False)
plt.subplot(2, 3, 5)
imshow_noax(out[1, 0])
plt.subplot(2, 3, 6)
imshow_noax(out[1, 1])
plt.show()
```
# Convolution: Naive backward pass
Implement the backward pass for the convolution operation in the function `conv_backward_naive` in the file `layers/layers.py`. Again, you don't need to worry too much about computational efficiency.
When you are done, run the following to check your backward pass with a numeric gradient check.
```
from layers.layers import conv_backward_naive
from utils.gradient_check import eval_numerical_gradient_array
np.random.seed(231)
x = np.random.randn(4, 3, 5, 5)
w = np.random.randn(2, 3, 3, 3)
b = np.random.randn(2,)
dout = np.random.randn(4, 2, 5, 5)
conv_param = {'stride': 1, 'pad': 1}
dx_num = eval_numerical_gradient_array(lambda x: conv_forward_naive(x, w, b, conv_param)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: conv_forward_naive(x, w, b, conv_param)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: conv_forward_naive(x, w, b, conv_param)[0], b, dout)
out, cache = conv_forward_naive(x, w, b, conv_param)
dx, dw, db = conv_backward_naive(dout, cache)
# Your errors should be around 1e-8'
print('Testing conv_backward_naive function')
print('dx error: ', rel_error(dx, dx_num))
print('dw error: ', rel_error(dw, dw_num))
print('db error: ', rel_error(db, db_num))
```
# Max pooling: Naive forward
Implement the forward pass for the max-pooling operation in the function `max_pool_forward_naive` in the file `layers/layers.py`. Again, don't worry too much about computational efficiency.
Check your implementation by running the following:
```
from layers.layers import max_pool_forward_naive
x_shape = (2, 3, 4, 4)
x = np.linspace(-0.3, 0.4, num=np.prod(x_shape)).reshape(x_shape)
pool_param = {'pool_width': 2, 'pool_height': 2, 'stride': 2}
out, _ = max_pool_forward_naive(x, pool_param)
correct_out = np.array([[[[-0.26315789, -0.24842105],
[-0.20421053, -0.18947368]],
[[-0.14526316, -0.13052632],
[-0.08631579, -0.07157895]],
[[-0.02736842, -0.01263158],
[ 0.03157895, 0.04631579]]],
[[[ 0.09052632, 0.10526316],
[ 0.14947368, 0.16421053]],
[[ 0.20842105, 0.22315789],
[ 0.26736842, 0.28210526]],
[[ 0.32631579, 0.34105263],
[ 0.38526316, 0.4 ]]]])
# Compare your output with ours. Difference should be around 1e-8.
print('Testing max_pool_forward_naive function:')
print('difference: ', rel_error(out, correct_out))
```
# Max pooling: Naive backward
Implement the backward pass for the max-pooling operation in the function `max_pool_backward_naive` in the file `layers/layers.py`. You don't need to worry about computational efficiency.
from aCheck your implementation with numeric gradient checking by running the following:
```
from layers.layers import max_pool_backward_naive
np.random.seed(231)
x = np.random.randn(3, 2, 8, 8)
dout = np.random.randn(3, 2, 4, 4)
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
dx_num = eval_numerical_gradient_array(lambda x: max_pool_forward_naive(x, pool_param)[0], x, dout)
out, cache = max_pool_forward_naive(x, pool_param)
dx = max_pool_backward_naive(dout, cache)
# Your error should be around 1e-12
print('Testing max_pool_backward_naive function:')
print('dx error: ', rel_error(dx, dx_num))
```
# Fast layers
Making convolution and pooling layers fast can be challenging. To spare you the pain, we've provided fast implementations of the forward and backward passes for convolution and pooling layers in the file `layers/fast_conv_layers.py`.
The fast convolution implementation depends on a Cython extension; to compile it you need to run the following from the `layers` directory:
```bash
python setup.py build_ext --inplace
```
The API for the fast versions of the convolution and pooling layers is exactly the same as the naive versions that you implemented above: the forward pass receives data, weights, and parameters and produces outputs and a cache object; the backward pass recieves upstream derivatives and the cache object and produces gradients with respect to the data and weights.
**NOTE:** The fast implementation for pooling will only perform optimally if the pooling regions are non-overlapping and tile the input. If these conditions are not met then the fast pooling implementation will not be much faster than the naive implementation.
You can compare the performance of the naive and fast versions of these layers by running the following:
```
from layers.fast_conv_layers import conv_forward_fast, conv_backward_fast
from time import time
np.random.seed(231)
x = np.random.randn(100, 3, 31, 31)
w = np.random.randn(25, 3, 3, 3)
b = np.random.randn(25,)
dout = np.random.randn(100, 25, 16, 16)
conv_param = {'stride': 2, 'pad': 1}
t0 = time()
out_naive, cache_naive = conv_forward_naive(x, w, b, conv_param)
t1 = time()
out_fast, cache_fast = conv_forward_fast(x, w, b, conv_param)
t2 = time()
print('Testing conv_forward_fast:')
print('Naive: %fs' % (t1 - t0))
print('Fast: %fs' % (t2 - t1))
print('Speedup: %fx' % ((t1 - t0) / (t2 - t1)))
print('Difference: ', rel_error(out_naive, out_fast))
print
t0 = time()
dx_naive, dw_naive, db_naive = conv_backward_naive(dout, cache_naive)
t1 = time()
dx_fast, dw_fast, db_fast = conv_backward_fast(dout, cache_fast)
t2 = time()
print('\nTesting conv_backward_fast:')
print('Naive: %fs' % (t1 - t0))
print('Fast: %fs' % (t2 - t1))
print('Speedup: %fx' % ((t1 - t0) / (t2 - t1)))
print('dx difference: ', rel_error(dx_naive, dx_fast))
print('dw difference: ', rel_error(dw_naive, dw_fast))
print('db difference: ', rel_error(db_naive, db_fast))
from layers.fast_conv_layers import max_pool_forward_fast, max_pool_backward_fast
np.random.seed(231)
x = np.random.randn(100, 3, 32, 32)
dout = np.random.randn(100, 3, 16, 16)
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
t0 = time()
out_naive, cache_naive = max_pool_forward_naive(x, pool_param)
t1 = time()
out_fast, cache_fast = max_pool_forward_fast(x, pool_param)
t2 = time()
print('Testing pool_forward_fast:')
print('Naive: %fs' % (t1 - t0))
print('fast: %fs' % (t2 - t1))
print('speedup: %fx' % ((t1 - t0) / (t2 - t1)))
print('difference: ', rel_error(out_naive, out_fast))
t0 = time()
dx_naive = max_pool_backward_naive(dout, cache_naive)
t1 = time()
dx_fast = max_pool_backward_fast(dout, cache_fast)
t2 = time()
print('\nTesting pool_backward_fast:')
print('Naive: %fs' % (t1 - t0))
print('speedup: %fx' % ((t1 - t0) / (t2 - t1)))
print('dx difference: ', rel_error(dx_naive, dx_fast))
```
# Convolutional "sandwich" layers
Previously we introduced the concept of "sandwich" layers that combine multiple operations into commonly used patterns. In the file `layers/layer_utils.py`, you will find sandwich layers that implement a few commonly used patterns for convolutional networks.
```
from layers.layer_utils import conv_relu_pool_forward, conv_relu_pool_backward
np.random.seed(231)
x = np.random.randn(2, 3, 16, 16)
w = np.random.randn(3, 3, 3, 3)
b = np.random.randn(3,)
dout = np.random.randn(2, 3, 8, 8)
conv_param = {'stride': 1, 'pad': 1}
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
out, cache = conv_relu_pool_forward(x, w, b, conv_param, pool_param)
dx, dw, db = conv_relu_pool_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], b, dout)
print('Testing conv_relu_pool')
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))
from layers.layer_utils import conv_relu_forward, conv_relu_backward
np.random.seed(231)
x = np.random.randn(2, 3, 8, 8)
w = np.random.randn(3, 3, 3, 3)
b = np.random.randn(3,)
dout = np.random.randn(2, 3, 8, 8)
conv_param = {'stride': 1, 'pad': 1}
out, cache = conv_relu_forward(x, w, b, conv_param)
dx, dw, db = conv_relu_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: conv_relu_forward(x, w, b, conv_param)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: conv_relu_forward(x, w, b, conv_param)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: conv_relu_forward(x, w, b, conv_param)[0], b, dout)
print('Testing conv_relu:')
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))
```
# Three-layer ConvNet
Now that you have implemented all the necessary layers, we can put them together into a simple convolutional network.
Open the file `classifiers/cnn.py` and complete the implementation of the `ThreeLayerConvNet` class. Run the following cells to help you debug:
## Sanity check loss
After you build a new network, one of the first things you should do is sanity check the loss. When we use the softmax loss, we expect the loss for random weights (and no regularization) to be about `log(C)` for `C` classes. When we add regularization this should go up.
```
from classifiers.cnn import ThreeLayerConvNet
model = ThreeLayerConvNet()
N = 50
X = np.random.randn(N, 3, 32, 32)
y = np.random.randint(10, size=N)
loss, grads = model.loss(X, y)
print('Initial loss (no regularization): ', loss)
model.reg = 0.5
loss, grads = model.loss(X, y)
print('Initial loss (with regularization): ', loss)
print(np.log(10))
```
## Gradient check
After the loss looks reasonable, use numeric gradient checking to make sure that your backward pass is correct. When you use numeric gradient checking you should use a small amount of artifical data and a small number of neurons at each layer. Note: correct implementations may still have relative errors up to 1e-2.
```
from utils.gradient_check import eval_numerical_gradient
num_inputs = 2
input_dim = (3, 16, 16)
reg = 0.0
num_classes = 10
np.random.seed(231)
X = np.random.randn(num_inputs, *input_dim)
y = np.random.randint(num_classes, size=num_inputs)
model = ThreeLayerConvNet(num_filters=3, filter_size=3,
input_dim=input_dim, hidden_dim=7,
dtype=np.float64)
loss, grads = model.loss(X, y)
for param_name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
param_grad_num = eval_numerical_gradient(f, model.params[param_name], verbose=False, h=1e-6)
e = rel_error(param_grad_num, grads[param_name])
print('%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name])))
```
## Overfit small data
A nice trick is to train your model with just a few training samples. You should be able to overfit small datasets, which will result in very high training accuracy and comparatively low validation accuracy.
```
from base.solver import Solver
np.random.seed(231)
num_train = 100
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
model = ThreeLayerConvNet(weight_scale=1e-2)
solver = Solver(model, small_data,
num_epochs=15, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=1)
solver.train()
```
Plotting the loss, training accuracy, and validation accuracy should show clear overfitting:
```
plt.subplot(2, 1, 1)
plt.plot(solver.loss_history, 'o')
plt.xlabel('iteration')
plt.ylabel('loss')
plt.subplot(2, 1, 2)
plt.plot(solver.train_acc_history, '-o')
plt.plot(solver.val_acc_history, '-o')
plt.legend(['train', 'val'], loc='upper left')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.show()
```
## Train the net
By training the three-layer convolutional network for one epoch, you should achieve greater than 40% accuracy on the training set:
```
model = ThreeLayerConvNet(weight_scale=0.001, hidden_dim=500, reg=0.001)
solver = Solver(model, data,
num_epochs=1, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=20)
solver.train()
```
## Visualize Filters
You can visualize the first-layer convolutional filters from the trained network by running the following:
```
from utils.vis_utils import visualize_grid
grid = visualize_grid(model.params['W1'].transpose(0, 2, 3, 1))
plt.imshow(grid.astype('uint8'))
plt.axis('off')
plt.gcf().set_size_inches(5, 5)
plt.show()
```
# Spatial Batch Normalization
We already saw that batch normalization is a very useful technique for training deep fully-connected networks. Batch normalization can also be used for convolutional networks, but we need to tweak it a bit; the modification will be called "spatial batch normalization."
Normally batch-normalization accepts inputs of shape `(N, D)` and produces outputs of shape `(N, D)`, where we normalize across the minibatch dimension `N`. For data coming from convolutional layers, batch normalization needs to accept inputs of shape `(N, C, H, W)` and produce outputs of shape `(N, C, H, W)` where the `N` dimension gives the minibatch size and the `(H, W)` dimensions give the spatial size of the feature map.
If the feature map was produced using convolutions, then we expect the statistics of each feature channel to be relatively consistent both between different imagesand different locations within the same image. Therefore spatial batch normalization computes a mean and variance for each of the `C` feature channels by computing statistics over both the minibatch dimension `N` and the spatial dimensions `H` and `W`.
## Spatial batch normalization: forward
In the file `layers/layers.py`, implement the forward pass for spatial batch normalization in the function `spatial_batchnorm_forward`. Check your implementation by running the following:
```
from layers.layers import spatial_batchnorm_forward, spatial_batchnorm_backward
np.random.seed(231)
# Check the training-time forward pass by checking means and variances
# of features both before and after spatial batch normalization
N, C, H, W = 2, 3, 4, 5
x = 4 * np.random.randn(N, C, H, W) + 10
print('Before spatial batch normalization:')
print(' Shape: ', x.shape)
print(' Means: ', x.mean(axis=(0, 2, 3)))
print(' Stds: ', x.std(axis=(0, 2, 3)))
# Means should be close to zero and stds close to one
gamma, beta = np.ones(C), np.zeros(C)
bn_param = {'mode': 'train'}
out, _ = spatial_batchnorm_forward(x, gamma, beta, bn_param)
print('After spatial batch normalization:')
print(' Shape: ', out.shape)
print(' Means: ', out.mean(axis=(0, 2, 3)))
print(' Stds: ', out.std(axis=(0, 2, 3)))
# Means should be close to beta and stds close to gamma
gamma, beta = np.asarray([3, 4, 5]), np.asarray([6, 7, 8])
out, _ = spatial_batchnorm_forward(x, gamma, beta, bn_param)
print('After spatial batch normalization (nontrivial gamma, beta):')
print(' Shape: ', out.shape)
print(' Means: ', out.mean(axis=(0, 2, 3)))
print(' Stds: ', out.std(axis=(0, 2, 3)))
np.random.seed(231)
# Check the test-time forward pass by running the training-time
# forward pass many times to warm up the running averages, and then
# checking the means and variances of activations after a test-time
# forward pass.
N, C, H, W = 10, 4, 11, 12
bn_param = {'mode': 'train'}
gamma = np.ones(C)
beta = np.zeros(C)
for t in range(50):
x = 2.3 * np.random.randn(N, C, H, W) + 13
spatial_batchnorm_forward(x, gamma, beta, bn_param)
bn_param['mode'] = 'test'
x = 2.3 * np.random.randn(N, C, H, W) + 13
a_norm, _ = spatial_batchnorm_forward(x, gamma, beta, bn_param)
# Means should be close to zero and stds close to one, but will be
# noisier than training-time forward passes.
print('After spatial batch normalization (test-time):')
print(' means: ', a_norm.mean(axis=(0, 2, 3)))
print(' stds: ', a_norm.std(axis=(0, 2, 3)))
```
## Spatial batch normalization: backward
In the file `layers/layers.py`, implement the backward pass for spatial batch normalization in the function `spatial_batchnorm_backward`. Run the following to check your implementation using a numeric gradient check:
```
np.random.seed(231)
N, C, H, W = 2, 3, 4, 5
x = 5 * np.random.randn(N, C, H, W) + 12
gamma = np.random.randn(C)
beta = np.random.randn(C)
dout = np.random.randn(N, C, H, W)
bn_param = {'mode': 'train'}
fx = lambda x: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0]
fg = lambda a: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0]
fb = lambda b: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
da_num = eval_numerical_gradient_array(fg, gamma, dout)
db_num = eval_numerical_gradient_array(fb, beta, dout)
_, cache = spatial_batchnorm_forward(x, gamma, beta, bn_param)
dx, dgamma, dbeta = spatial_batchnorm_backward(dout, cache)
print('dx error: ', rel_error(dx_num, dx))
print('dgamma error: ', rel_error(da_num, dgamma))
print('dbeta error: ', rel_error(db_num, dbeta))
```
| github_jupyter |
Wayne Nixalo - 2017-Jun-12 17:27
Code-Along of Lesson 5 JNB.
Lesson 5 NB: https://github.com/fastai/courses/blob/master/deeplearning1/nbs/lesson5.ipynb
[Lecture](https://www.youtube.com/watch?v=qvRL74L81lg)
```
import theano
%matplotlib inline
import sys, os
sys.path.insert(1, os.path.join('utils'))
import utils; reload(utils)
from utils import *
from __future__ import division, print_function
model_path = 'data/imdb/models/'
%mkdir -p $model_path # -p : make intermediate directories as needed
```
## Setup data
We're going to look at the IMDB dataset, which contains movie reviews from IMDB, along with their sentiment. Keras comes with some helpers for this dataset.
```
from keras.datasets import imdb
idx = imdb.get_word_index()
```
This is the word list:
```
idx_arr = sorted(idx, key=idx.get)
idx_arr[:10]
```
...and this is the mapping from id to word:
```
idx2word = {v: k for k, v in idx.iteritems()}
```
We download the reviews using code copied from keras.datasets:
```
# getting the dataset directly bc keras's versn makes some changes
path = get_file('imdb_full.pkl',
origin='https://s3.amazonaws.com/text-datasets/imdb_full.pkl',
md5_hash='d091312047c43cf9e4e38fef92437263')
f = open(path, 'rb')
(x_train, labels_train), (x_test, labels_test) = pickle.load(f)
# apparently cpickle can be x1000 faster than pickle? hmm
len(x_train)
```
Here's the 1st review. As you see, the words have been replaced by ids. The ids can be looked up in idx2word.
```
', '.join(map(str, x_train[0]))
```
The first word of the first review is 23022. Let's see what that is.
```
idx2word[23022]
x_train[0]
```
Here's the whole review, mapped from ids to words.
```
' '.join([idx2word[o] for o in x_train[0]])
```
The labels are 1 for positive, 0 for negative
```
labels_train[:10]
```
Reduce vocabulary size by setting rare words to max index.
```
vocab_size = 5000
trn = [np.array([i if i < vocab_size-1 else vocab_size-1 for i in s]) for s in x_train]
test = [np.array([i if i < vocab_size-1 else vocab_size-1 for i in s]) for s in x_test]
```
Look at distribution of lengths of sentences
```
lens = np.array(map(len, trn))
(lens.max(), lens.min(), lens.mean())
```
Pad (with zero) or truncate each sentence to make consistent length.
```
seq_len = 500
# keras.preprocessing.sequence
trn = sequence.pad_sequences(trn, maxlen=seq_len, value=0)
test = sequence.pad_sequences(test, maxlen=seq_len, value=0)
```
This results in nice rectangular matrices that can be passed to ML algorithms. Reviews shorter than 500 words are prepadded with zeros, those greater are truncated.
```
trn.shape
trn[0]
```
## Create simple models
### Single hidden layer NN
This simplest model that tends to give reasonable results is a single hidden layer net. So let's try that. Note that we can't expect to get any useful results by feeding word ids directly into a neural net - so instead we use an embedding to replace them with a vector of 32 (initially random) floats for each word in the vocab.
```
model = Sequential([
Embedding(vocab_size, 32, input_length=seq_len),
Flatten(),
Dense(100, activation='relu'),
Dropout(0.7),
Dense(1, activation='sigmoid')])
model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])
model.summary()
# model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64)
# redoing on Linux
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64)
```
The [Stanford paper](http://ai.stanford.edu/~amaas/papers/wvSent_acl2011.pdf) that this dataset is from cites a state of the art accuacy (without unlabelled data) of 0.883. So we're short of that, but on the right track.
### Single Conv layer with Max Pooling
A CNN is likely to work better, since it's designed to take advantage of ordered data. We'll need to use a 1D CNN, since a sequence of words is 1D.
```
# the embedding layer is always the first step in every NLP model
# --> after that layer, you don't have words anymore: vectors
conv1 = Sequential([
Embedding(vocab_size, 32, input_length=seq_len, dropout=0.2),
Dropout(0.2),
Convolution1D(64, 5, border_mode='same', activation='relu'),
Dropout(0.2),
MaxPooling1D(),
Flatten(),
Dense(100, activation='relu'),
Dropout(0.7),
Dense(1, activation='sigmoid')])
conv1.summary()
conv1.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])
# conv1.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=4, batch_size=64)
# redoing on Linux w/ GPU
conv1.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=4, batch_size=64)
```
That's well past the Stanford paper's accuracy - another win for CNNs!
*Heh, the above take a lot longer than 4s on my Mac*
```
conv1.save_weights(model_path + 'conv1.h5')
# conv1.load_weights(model_path + 'conv1.h5')
```
## Pre-trained Vectors
You may want to look at wordvectors.ipynb before moving on.
In this section, we replicate the previous CNN, but using pre-trained embeddings.
```
def get_glove_dataset(dataset):
"""Download the requested glove dataset from files.fast.ai
and return a location that can be passed to load_vectors.
"""
# see wordvectors.ipynb for info on how these files were
# generated from the original glove data.
md5sums = {'6B.50d' : '8e1557d1228decbda7db6dfd81cd9909',
'6B.100d': 'c92dbbeacde2b0384a43014885a60b2c',
'6B.200d': 'af271b46c04b0b2e41a84d8cd806178d',
'6B.300d': '30290210376887dcc6d0a5a6374d8255'}
glove_path = os.path.abspath('data/glove.6B/results')
%mkdir -p $glove_path
return get_file(dataset,
'https://files.fast.ai/models/glove/' + dataset + '.tgz',
cache_subdir=glove_path,
md5_hash=md5sums.get(dataset, None),
untar=True)
# not able to download from above, so using code from wordvectors_CodeAlong.ipynb to load
def get_glove(name):
with open(path+ 'glove.' + name + '.txt', 'r') as f: lines = [line.split() for line in f]
words = [d[0] for d in lines]
vecs = np.stack(np.array(d[1:], dtype=np.float32) for d in lines)
wordidx = {o:i for i,o in enumerate(words)}
save_array(res_path+name+'.dat', vecs)
pickle.dump(words, open(res_path+name+'_words.pkl','wb'))
pickle.dump(wordidx, open(res_path+name+'_idx.pkl','wb'))
# # adding return filename
# return res_path + name + '.dat'
def load_glove(loc):
return (load_array(loc + '.dat'),
pickle.load(open(loc + '_words.pkl', 'rb')),
pickle.load(open(loc + '_idx.pkl', 'rb')))
def load_vectors(loc):
return (load_array(loc + '.dat'),
pickle.load(open(loc + '_words.pkl', 'rb')),
pickle.load(open(loc + '_idx.pkl', 'rb')))
# apparently pickle is a `bit-serializer` or smth like that?
# this isn't working, so instead..
vecs, words, wordidx = load_vectors(get_glove_dataset('6B.50d'))
# trying to load the glove data I downloaded directly, before:
vecs, words, wordix = load_vectors('data/glove.6B/' + 'glove.' + '6B.50d' + '.txt')
# vecs, words, wordix = load_vectors('data/glove.6B/' + 'glove.' + '6B.50d' + '.tgz')
# not successful. get_file(..) returns filepath as '.tar' ? as .tgz doesn't work.
# ??get_file # keras.utils.data_utils.get_file(..)
# that doesn't work either, but method from wordvectors JNB worked so:
path = 'data/glove.6B/'
# res_path = path + 'results/'
res_path = 'data/imdb/results/'
%mkdir -p $res_path
# this way not working; so will pull vecs,words,wordidx manually:
# vecs, words, wordidx = load_vectors(get_glove('6B.50d'))
get_glove('6B.50d')
vecs, words, wordidx = load_glove(res_path + '6B.50d')
# NOTE: yay it worked..!..
def create_emb():
n_fact = vecs.shape[1]
emb = np.zeros((vocab_size, n_fact))
for i in xrange(1, len(emb)):
word = idx2word[i]
if word and re.match(r"^[a-zA-Z0-9\-]*$", word):
src_idx = wordidx[word]
emb[i] = vecs[src_idx]
else:
# If we can't find the word in glove, randomly initialize
emb[i] = normal(scale=0.6, size=(n_fact,))
# This is our "rare word" id - we want to randomly initialize
emb[-1] = normal(scale=0.6, size=(n_fact,))
emb /= 3
return emb
emb = create_emb()
# this embedding matrix is now the glove word vectors, indexed according to
# the imdb dataset.
```
We pass out embedding matrix to the Embedding constructor, and set it to non-trainable.
```
model = Sequential([
Embedding(vocab_size, 50, input_length=seq_len, dropout=0.2,
weights=[emb], trainable=False),
Dropout(0.25),
Convolution1D(64, 5, border_mode='same', activation='relu'),
Dropout(0.25),
MaxPooling1D(),
Flatten(),
Dense(100, activation='relu'),
Dropout(0.7),
Dense(1, activation='sigmoid')])
# this is copy-pasted of the previous code, with the addition of the
# weights being the pre-trained embeddings.
# We figure the weights are pretty good, so we'll initially set
# trainable to False. Will finetune due to some words missing or etc..
model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64)
# running on GPU
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64)
```
We've already beated our previous model! But let's fine-tune the embedding weights - especially since the words we couldn't find in glove just have random embeddings.
```
model.layers[0].trainable=True
model.optimizer.lr=1e-4
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64)
# running on GPU
model.optimizer.lr=1e-4
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64)
# the above was supposed to be 3 total epochs but I did 4 by mistake
model.save_weights(model_path+'glove50.h5')
```
## Multi-size CNN
This is an implementation of a multi-size CNN as show in Ben Bowles' [blog post.](https://quid.com/feed/how-quid-uses-deep-learning-with-small-data)
```
from keras.layers import Merge
```
We use the functional API to create multiple ocnv layers of different sizes, and then concatenate them.
```
graph_in = Input((vocab_size, 50))
convs = [ ]
for fsz in xrange(3, 6):
x = Convolution1D(64, fsz, border_mode='same', activation='relu')(graph_in)
x = MaxPooling1D()(x)
x = Flatten()(x)
convs.append(x)
out = Merge(mode='concat')(convs)
graph = Model(graph_in, out)
emb = create_emb()
```
We then replace the conv/max-pool layer in our original CNN with the concatenated conv layers.
```
model = Sequential ([
Embedding(vocab_size, 50, input_length=seq_len, dropout=0.2, weights=[emb]),
Dropout(0.2),
graph,
Dropout(0.5),
Dense(100, activation='relu'),
Dropout(0.7),
Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy'])
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64)
# on GPU
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64)
```
Interestingly, I found that in this case I got best results when I started the embedding layer as being trainable, and then set it to non-trainable after a couple of epochs. I have no idea why! *hmmm*
```
model.layers[0].trainable=False
model.optimizer.lr=1e-5
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64)
# on gpu
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64)
conv1.save_weights(model_path + 'conv1_1.h5')
# conv1.load_weights(model_path + 'conv1.h5')
```
This more complex architecture has given us another boost in accuracy.
## LSTM
We haven't covered this bit yet!
```
model = Sequential([
Embedding(vocab_size, 32, input_length=seq_len, mask_zero=True,
W_regularizer=l2(1e-6), dropout=0.2),
LSTM(100, consume_less='gpu'),
Dense(1, activation='sigmoid')])
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=5, batch_size=64)
# NOTE: if this took 100s/epoch using TitanX's or Tesla K80s ... use the Linux machine for this
conv1.save_weights(model_path + 'LSTM_1.h5')
```
| github_jupyter |
# Barycenters of persistence diagrams
Theo Lacombe
https://tlacombe.github.io/
## A statistical descriptor in the persistence diagram space
This tutorial presents the concept of barycenter, or __Fréchet mean__, of a family of persistence diagrams. Fréchet means, in the context of persistence diagrams, were initially introduced in the seminal papers:
- Probability measures on the space of persistence diagrams, by Mileyko, Mukherjee, and Harer. https://math.hawaii.edu/~yury/papers/probpers.pdf ,
- Fréchet means for distributions of persistence diagrams, by Turner, Mileyko, Mukherjee and Harer, https://arxiv.org/pdf/1206.2790.pdf
and later studied in https://arxiv.org/pdf/1901.03048.pdf (theoretical viewpoint) and https://arxiv.org/pdf/1805.08331.pdf (computational viewpoint).
## Motivation and mathematical formulation
Recall that given an object $X$, say a point cloud embedded in the Euclidean space $\mathbb{R}^d$, one can compute its persistence diagram $\mathrm{Dgm}(X)$ which is a point cloud supported on a half-plane $\Omega \subset \mathbb{R}^2$ (see this tutorial https://github.com/GUDHI/TDA-tutorial/blob/master/Tuto-GUDHI-persistence-diagrams.ipynb for an introduction to persistence diagrams).
Now, consider that instead of building one diagram $\mathrm{Dgm}(X)$ from one object $X$, you observe a collection of objects $X_1 \dots X_n$ and compute their respective diagrams, let's call them $\mu_1 \dots \mu_n$. How can you build a statistical summary of this information?
Fréchet means is one way to do so. It mimics the notion of arithmetic mean in metric spaces. First, recall that the space of persistence diagrams, equipped with either the bottleneck (https://gudhi.inria.fr/python/latest/bottleneck_distance_user.html) or the Wasserstein (https://gudhi.inria.fr/python/latest/wasserstein_distance_user.html) metrics is **not** a linear space. Therefore, the notion of arithmetic mean cannot be faithfully transposed to the context of persistence diagrams.
To overcome this limitation, one relies on _Fréchet means_. In Euclidean spaces, one of the characterization of the arithmetic mean
$$ \overline{x} = \frac{1}{n} \sum_{i=1}^n x_i $$
of a sample $x_1 \dots x_n \in \mathbb{R}^d$ is that it minimizes the _variance_ of the sample, that is the map
$$\mathcal{E} : x \mapsto \sum_{i=1}^n \|x - x_i \|_2^2 $$
has a unique minimizer, that turns out to be $\overline{x}$.
Although the former formula does not make sense in general metric spaces, the map $\mathcal{E}$ can still be defined, in particular in the context of persistence diagrams. Therefore, a _Fréchet mean_ of $\mu_1 \dots \mu_n$ is any minimizer, should it exist, of the map
$$ \mathcal{E} : \mu \mapsto \sum_{i=1}^n d_2(\mu, \mu_i)^2, $$
where $d_2$ denotes the so-called Wasserstein-2 distance between persistence diagrams.
It has been proved that Fréchet means of persistence diagrams always exist in the context of averaging finitely many diagrams. Their computation remains however challenging.
## A Lagrangian algorithm
We showcase here one of the algorithm used to _estimate_ barycenters of a (finite) family of persistence diagrams (note that their exact computation is intractable in general). This algorithm was introduced by Turner et al. (https://arxiv.org/pdf/1206.2790.pdf) and adopts a _lagrangian_ perspective. Roughly speaking (see details in their paper), this algorithm consists in iterating the following:
- Let $\mu$ be a current estimation of the barycenter of $\mu_1 \dots \mu_n$.
- (1) Compute $\sigma_i$ ($1 \leq i \leq n$) the optimal (partial) matching between $\mu$ and $\mu_i$.
- (2) For each point $x$ of the diagram $\mu$, apply $x \mapsto \mathrm{mean}((\sigma_i(x))_i)$, where $\mathrm{mean}$ is the arithemtic mean in $\mathbb{R}^2$.
- (3) If $\mu$ didn't change, return $\mu$. Otherwise, go back to (1).
This algorithm is proved to converge ($\mathcal{E}$ decreases at each iteration) to a _local_ minimum of the map $\mathcal{E}$. Indeed, the map $\mathcal{E}$ is **not convex**, which can unfortunately lead to arbritrary bad local minima. Furthermore, its combinatorial aspect (one must compute $n$ optimal partial matching at each iteration step), makes it too computationally expensive when dealing with a large number of large diagrams. It is however a fairly decent attempt when dealing with few diagrams with few points.
The solution $\mu^*$ returned by the algorithm is a persistence diagram with the following property:
each point $x \in \mu^*$ is the mean of one point (or the diagonal) $\sigma_i(x)$ in each of the $\mu_i$s. These are called _groupings_.
**Note:** This algorithm is said to be based on a _Lagrangian_ approach by opposition to _Eulerian_ , from fluid dynamics formalism (https://en.wikipedia.org/wiki/Lagrangian_and_Eulerian_specification_of_the_flow_field). Roughly speaking, Lagrangian models track the position of each particule individually (here, the points in the barycenter estimate), while Eulerian models instead measure the quantity of mass that is present in each location of the space. We will present in a next version of this tutorial an Eulerian approach to solve (approximately) this problem.
## Illustration
### Imports and preliminary tests
```
import gudhi
print("Current gudhi version:", gudhi.__version__)
print("Version >= 3.2.0 is required for this tutorial")
# Note: %matplotlib notebook allows for iteractive 3D plot.
#%matplotlib notebook
%matplotlib inline
from gudhi.wasserstein.barycenter import lagrangian_barycenter as bary
from gudhi.persistence_graphical_tools import plot_persistence_diagram
import numpy as np
import matplotlib.pyplot as plt
```
### Exemple
Let us consider three persistence diagrams.
```
diag1 = np.array([[0., 1.], [0, 2], [1, 2], [1.32, 1.87], [0.7, 1.2]])
diag2 = np.array([[0, 1.5], [0.5, 2], [1.2, 2], [1.3, 1.8], [0.4, 0.8]])
diag3 = np.array([[0.2, 1.1], [0.1, 2.2], [1.3, 2.1], [0.5, 0.9], [0.6, 1.1]])
diags = [diag1, diag2, diag3]
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
colors=['r', 'b', 'g']
for diag, c in zip(diags, colors):
plot_persistence_diagram(diag, axes=ax, colormap=c)
ax.set_title("Set of 3 persistence diagrams", fontsize=22)
```
Now, let us compute (more precisely, estimate) a barycenter of `diags`.
Using the verbose option, we can get access to a `log` (dictionary) that contains complementary informations.
```
b, log = bary(diags,
init=0,
verbose=True) # we initialize our estimation on the first diagram (the red one.)
print("Energy reached by this estimation of the barycenter: E=%.2f." %log['energy'])
print("Convergenced made after %s steps." %log['nb_iter'])
```
Using the `groupings` provided in logs, we can have a better visibility on what is happening.
```
G = log["groupings"]
def proj_on_diag(x):
return ((x[1] + x[0]) / 2, (x[1] + x[0]) / 2)
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
colors = ['r', 'b', 'g']
for diag, c in zip(diags, colors):
plot_persistence_diagram(diag, axes=ax, colormap=c)
def plot_bary(b, diags, groupings, axes):
# n_y = len(Y.points)
for i in range(len(diags)):
indices = G[i]
n_i = len(diags[i])
for (y_j, x_i_j) in indices:
y = b[y_j]
if y[0] != y[1]:
if x_i_j >= 0: # not mapped with the diag
x = diags[i][x_i_j]
else: # y_j is matched to the diagonal
x = proj_on_diag(y)
ax.plot([y[0], x[0]], [y[1], x[1]], c='black',
linestyle="dashed")
ax.scatter(b[:,0], b[:,1], color='purple', marker='d', label="barycenter (estim)")
ax.legend()
ax.set_title("Set of diagrams and their barycenter", fontsize=22)
plot_bary(b, diags, G, axes=ax)
```
Note that, as the problem is not convex, the output (and its quality, i.e. energy) might depend on optimization.
Energy: lower is better.
```
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
colors = ['r', 'b', 'g']
for i, ax in enumerate(axs):
for diag, c in zip(diags, colors):
plot_persistence_diagram(diag, axes=ax, colormap=c)
b, log = bary(diags, init=i, verbose=True)
e = log["energy"]
G = log["groupings"]
# print(G)
plot_bary(b, diags, groupings=G, axes=ax)
ax.set_title("Barycenter estim with init=%s. Energy: %.2f" %(i, e), fontsize=14)
```
| github_jupyter |
# Dataframe basics
### Dr. Tirthajyoti Sarkar, Fremont, CA 94536
### Apache Spark
Apache Spark is one of the hottest new trends in the technology domain. It is the framework with probably the highest potential to realize the fruit of the marriage between Big Data and Machine Learning. It runs fast (up to 100x faster than traditional Hadoop MapReduce) due to in-memory operation, offers robust, distributed, fault-tolerant data objects (called RDD), and inte-grates beautifully with the world of machine learning and graph analytics through supplementary
packages like Mlib and GraphX.
Spark is implemented on Hadoop/HDFS and written mostly in Scala, a functional programming language, similar to Java. In fact, Scala needs the latest Java installation on your system and runs on JVM. However, for most of the beginners, Scala is not a language that they learn first to venture into the world of data science. Fortunately, Spark provides a wonderful Python integration, called PySpark, which lets Python programmers to interface with the Spark framework and
learn how to manipulate data at scale and work with objects and algorithms over a distributed file system.
### Dataframe
In Apache Spark, a DataFrame is a distributed collection of rows under named columns. It is conceptually equivalent to a table in a relational database, an Excel sheet with Column headers, or a data frame in R/Python, but with richer optimizations under the hood. DataFrames can be constructed from a wide array of sources such as: structured data files, tables in Hive, external databases, or existing RDDs. It also shares some common characteristics with RDD:
* Immutable in nature : We can create DataFrame / RDD once but can’t change it. And we can transform a DataFrame / RDD after applying transformations.
* Lazy Evaluations: Which means that a task is not executed until an action is performed.
* Distributed: RDD and DataFrame both are distributed in nature.
### Advantages of the DataFrame
* DataFrames are designed for processing large collection of structured or semi-structured data.
* Observations in Spark DataFrame are organised under named columns, which helps Apache Spark to understand the schema of a DataFrame. This helps Spark optimize execution plan on these queries.
* DataFrame in Apache Spark has the ability to handle petabytes of data.
* DataFrame has a support for wide range of data format and sources.
* It has API support for different languages like Python, R, Scala, Java.
```
import pyspark
from pyspark import SparkContext as sc
```
### Create a `SparkSession` app object
```
from pyspark.sql import SparkSession
spark1 = SparkSession.builder.appName('Basics').getOrCreate()
```
### Read in a JSON file and examine the data
```
df = spark1.read.json('Data/people.json')
```
#### Unlike Pandas DataFrame, it does not show itself when called
Instead, it just shows the Data types of the columns
```
df
```
#### You have to call show() method to evaluate it i.e. show it
```
df.show()
```
### The data schema
Use `printSchema()` to show he schema of the data. Note, how tightly it is integrated to the SQL-like framework. You can even see that the schema accepts `null` values because nullable property is set `True`.
```
df.printSchema()
```
#### Fortunately a simple `columns` method exists to get column names back as a Python list
```
df.columns
```
### The `describe` and `summary` methods
Similar to Pandas, the `describe` method is used for the statistical summary. But unlike Pandas, calling only `describe()` returns a DataFrame! This is due to the **[lazy evaluation](https://data-flair.training/blogs/apache-spark-lazy-evaluation/)** - the actual computation is delayed as much as possible.
```
df.describe()
```
#### We have to call `show` again
```
df.describe().show()
```
#### There is `summary` method for more stats
```
df.summary().show()
```
### The `take` and `collect` methods to read/collect rows
These methods return some or all rows as a Python list.
```
df.take(2)
df.collect()
```
### Defining your own Data Schema
Import data types and structure types to build the data schema yourself
```
from pyspark.sql.types import StructField, IntegerType, StringType, StructType
```
Define your data schema by supplying name and data types to the structure fields you will be importing. It will be a simple Python list of `StructField` objects. You have to use Spark data types like `IntegerType` and `StringType`.
```
data_schema = [StructField('age',IntegerType(),True),
StructField('name',StringType(),True)]
```
Now create a `StrucType` object called `final_struc` with this schema as field
```
final_struc = StructType(fields=data_schema)
```
Now read in the same old JSON with this new schema `final_struc`
```
df = spark1.read.json('Data/people.json',schema=final_struc)
df.show()
```
Now when you print the schema, **you will see that the `age` is read as `int` and not `long`**. By default Spark could not figure out for this column the exact data type that you wanted, so it went with `long`. But this is how you can build your own schema and instruct Spark to read the data accoridngly.
| github_jupyter |
## Driver code for training models to learn pipeline 1 x pipeline 2 transform maps
- Note: currently using output after atlas-based grouping
- Atlas used: aparc (Freesurfer) DKT-31 Mindboggle (ANTs: https://mindboggle.readthedocs.io/en/latest/labels.html)
```
import sys
import numpy as np
import pandas as pd
import itertools
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.manifold import TSNE
sys.path.append('../lib')
from data_handling import *
from data_stats import *
from deeplearning import *
```
### Data paths
```
proj_dir = '/home/nikhil/projects/CT_reproduce/code/compare-surf-tools/'
#proj_dir = '/Users/nikhil/projects/compare-surf-tools/'
data_dir = proj_dir + 'data/'
fs60_dir = data_dir + 'fs60_group_stats/'
qc_dir = '/home/nikhil/projects/CT_reproduce/data/QC/'
results_dir = data_dir + 'results/'
demograph_file = 'ABIDE_Phenotype.csv'
dkt_roi_names = 'DKT_parcel_map_FS_CIVET.csv'
ants_file = 'ABIDE_ants_thickness_data.csv' #uses modified (mindboggle) dkt atlas with 31 ROIs
civet_file = 'ABIDE_civet2.1_thickness_test1.csv'
fs53_file = 'ABIDE_fs5.3_thickness.csv'
fs51_file = 'cortical_fs5.1_measuresenigma_thickavg.csv'
fs60_lh_file = 'lh.aparc.thickness.table.test1' #'aparc_lh_thickness_table.txt' #'lh.aparc.thickness.table.test1'
fs60_rh_file = 'rh.aparc.thickness.table.test1' #'aparc_rh_thickness_table.txt' #'rh.aparc.thickness.table.test1'
```
### Global Vars
```
subject_ID_col = 'SubjID'
```
### Load data
```
# Demographics and Dx
demograph = pd.read_csv(data_dir + demograph_file)
demograph = demograph.rename(columns={'Subject_ID':subject_ID_col})
# ROI names
dkt_roi_map = pd.read_csv(data_dir + dkt_roi_names)
# CIVET 2.1
civet_data = pd.read_csv(data_dir + civet_file, dtype={subject_ID_col: str})
print('shape of civet data {}'.format(civet_data.shape))
civet_data_std = standardize_civet_data(civet_data, subject_ID_col, dkt_roi_map)
print('shape of stdized civet data {}'.format(civet_data_std.shape))
print('')
# ANTs
ants_data = pd.read_csv(data_dir + ants_file, header=2)
print('shape of ants data {}'.format(ants_data.shape))
ants_data_std = standardize_ants_data(ants_data, subject_ID_col)
print('shape of stdized ants data {}'.format(ants_data_std.shape))
print('')
# FS
fs53_data = pd.read_csv(data_dir + fs53_file)
print('shape of fs53 data {}'.format(fs53_data.shape))
fs53_data_std = standardize_fs_data(fs53_data, subject_ID_col)
print('shape of stdized fs53 data {}'.format(fs53_data_std.shape))
print('')
fs51_data = pd.read_csv(data_dir + fs51_file)
print('shape of fs51 data {}'.format(fs51_data.shape))
fs51_data_std = standardize_fs_data(fs51_data, subject_ID_col)
print('shape of stdized fs51 data {}'.format(fs51_data_std.shape))
print('')
fs60_lh_data = pd.read_csv(fs60_dir + fs60_lh_file, delim_whitespace=True)
fs60_rh_data = pd.read_csv(fs60_dir + fs60_rh_file, delim_whitespace=True)
print('shape of fs60 data l: {}, r: {}'.format(fs60_lh_data.shape,fs60_rh_data.shape))
fs60_data_std = standardize_fs60_data(fs60_lh_data, fs60_rh_data, subject_ID_col)
print('shape of stdized fs60 data {}'.format(fs60_data_std.shape))
```
### Create master dataframe
```
data_dict = {'civet': civet_data_std,
'fs60' : fs51_data_std}
na_action = 'drop' # options: ignore, drop; anything else will not use the dataframe for analysis.
master_df_raw, common_subs, common_roi_cols = combine_processed_data(data_dict, subject_ID_col, na_action)
# Add demographic columns to the master_df_raw
useful_demograph = demograph[[subject_ID_col,'SEX','AGE_AT_SCAN','DX_GROUP','SITE_ID']].copy()
# DX_GROUP: (orginal: 1:ASD, 2:Controls, after shift 0:ASD, 1:Controls)
# Shift to (0 and 1 instead of 1 and 2 for statsmodels)
useful_demograph['DX_GROUP'] = useful_demograph['DX_GROUP']-1
useful_demograph['SEX'] = useful_demograph['SEX']-1
_,useful_demograph[subject_ID_col] = useful_demograph[subject_ID_col].str.rsplit('_', 1).str
master_df_raw = pd.merge(master_df_raw, useful_demograph, how='left', on=subject_ID_col)
print('\nmaster df shape after adding demographic info {}'.format(master_df_raw.shape))
print('\nNumber of common subjects {}({}), ROIs {}'.format(len(common_subs), master_df_raw[master_df_raw['pipeline']=='fs60']['DX_GROUP'].value_counts().to_dict(),len(common_roi_cols)))
```
### QC filters
- Manual (Gleb or Maarten)
- Automatic (Amadou)
```
qc_type = 'maarten' #condition: master_df['QC_maarten']==0, master_df['QC_gleb'].isin['1','-+1']
if qc_type in ['maarten','gleb']:
qc_df = pd.read_csv(qc_dir + 'master_QC_table.csv',dtype={'SubjID': str})
master_df = pd.merge(master_df_raw, qc_df, how='left', on=subject_ID_col)
master_df = master_df[master_df['QC_maarten']==0]
print('Filtering based on {} QC. Resultant number of subjects {} ({}) (out of {})'.format(qc_type,len(master_df[subject_ID_col].unique()),master_df[master_df['pipeline']=='fs60']['DX_GROUP'].value_counts().to_dict(),len(common_subs)))
common_subs = master_df[subject_ID_col].unique()
else:
master_df = master_df_raw
print('No QC performed. master_df shape {}'.format(len(master_df[subject_ID_col].unique())))
```
### Create CV folds
```
n_splits = 1
test_size = 0.2
input_pipe = 'fs60'
output_pipe = 'civet'
ml_demograph = master_df[master_df['pipeline']=='fs60'][[subject_ID_col,'DX_GROUP','SEX','SITE_ID']]
X = master_df[master_df['pipeline']==input_pipe][[subject_ID_col] + common_roi_cols]
Y = master_df[master_df['pipeline']==output_pipe][[subject_ID_col] + common_roi_cols]
subject_idx = ml_demograph[subject_ID_col]
dx = ml_demograph['DX_GROUP']
print('Shape X {}, Y {}'.format(X.shape, Y.shape))
print('Shape subject_ids {}, dx {}'.format(subject_idx.shape, dx.shape))
# Use subject_ids for indexing to maintain correspondance between X and Y
# Use dx for stratification
sss = StratifiedShuffleSplit(n_splits=n_splits, test_size=test_size, random_state=0)
cv_list = []
for train_index, test_index in sss.split(subject_idx, dx):
subject_idx_train = subject_idx[train_index]
subject_idx_test = subject_idx[test_index]
X_train, X_test = X[X[subject_ID_col].isin(subject_idx_train)], X[X[subject_ID_col].isin(subject_idx_test)]
Y_train, Y_test = Y[Y[subject_ID_col].isin(subject_idx_train)], Y[Y[subject_ID_col].isin(subject_idx_test)]
cv_list.append((X_train,X_test,Y_train,Y_test))
print('')
print('Train Shapes X {}, Y {}'.format(X_train.shape, Y_train.shape))
print('Test Shapes X {}, Y {}'.format(X_test.shape, Y_test.shape))
print('\n number of CV folds {}'.format(len(cv_list)))
```
### Train model
```
# training params
lr = 0.001
n_epochs = 100
validate_after = 10
batch_size = 20
dropout = 1 #keep_prob
verbose = False # Do you want to print perf after every epoch??
save_model = False
save_model_path = './'
net_arch = {'input':62,'n_layers':4,'l1':10,'l2':10,'l3':10,'l4':10,'l5':10,'output':62,
'reg':0.1, 'loss_type':'corr'}
test_err_melt_concat = pd.DataFrame()
for fold in range(len(cv_list)):
print('\nStarting fold {}'.format(fold))
X_train,X_test,Y_train,Y_test = cv_list[fold]
tf.reset_default_graph()
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
# Train model
data = {'X':X_train[common_roi_cols].values,'y':Y_train[common_roi_cols].values}
simple_ae = pipeline_AE(net_arch)
optimizer = tf.train.AdamOptimizer(learning_rate = lr).minimize(simple_ae.loss)
tf.global_variables_initializer().run()
saver = tf.train.Saver()
cur_time = datetime.time(datetime.now())
print('\nStart training time: {}'.format(cur_time))
simple_ae, train_metrics = train_network(sess, simple_ae, data, optimizer, n_epochs,
batch_size, dropout,validate_after,verbose)
#Save trained model
if save_model:
print('saving model at {}'.format(save_model_path + 'simple_ae_example'))
saver.save(sess, save_model_path + 'simple_ae_example')
cur_time = datetime.time(datetime.now())
print('End training time: {}\n'.format(cur_time))
# Test model
print('Test perf')
data = {'X':X_test[common_roi_cols].values,'y':Y_test[common_roi_cols].values}
_,test_metrics = test_network(sess,simple_ae,data)
# Null Test: create fake output from output itself
# (Should be worse than actual performance)
print( '\nNull test perf')
data = {'X':Y_test[common_roi_cols].values,'y':Y_test[common_roi_cols].values}
_,test_metrics_null = test_network(sess,simple_ae,data)
# populate perf dataframe
test_err = pd.DataFrame()
test_err[subject_ID_col] = Y_test[subject_ID_col]
test_err[common_roi_cols] = Y_test[common_roi_cols] - test_metrics['test_preds']
test_err_melt = pd.melt(test_err, id_vars =[subject_ID_col], value_vars =common_roi_cols,
var_name ='ROI', value_name ='err')
test_err_melt['fold'] = np.tile(fold,len(test_err_melt))
test_err_melt['model'] = np.tile('real',len(test_err_melt))
test_err_melt_concat = test_err_melt_concat.append(test_err_melt)
# Null test
test_err_null = pd.DataFrame()
test_err_null[subject_ID_col] = Y_test[subject_ID_col]
test_err_null[common_roi_cols] = Y_test[common_roi_cols] - test_metrics_null['test_preds']
test_err_null_melt = pd.melt(test_err_null, id_vars=[subject_ID_col], value_vars=common_roi_cols,
var_name ='ROI', value_name ='err')
test_err_null_melt['fold'] = np.tile(fold,len(test_err_null_melt))
test_err_null_melt['model'] = np.tile('null',len(test_err_null_melt))
# Append to the same dataframe since subject IDs are the same
test_err_melt_concat = test_err_melt_concat.append(test_err_null_melt)
print('\nEnding fold {}'.format(fold))
train_loss = train_metrics['train_loss']
valid_loss = train_metrics['valid_loss']
test_loss = test_metrics['test_loss']
plt.figure(figsize=(10,5))
plt.style.use('seaborn-white')
sns.set(font_scale=1)
plt.plot(train_loss,label='train');
plt.plot(valid_loss,label='valid');
plt.plot(np.tile(test_loss,len(train_loss)),label='test');
plt.title('Loss')
plt.xlabel('number of epoch x{}'.format(validate_after))
plt.legend()
```
### Test Perf: MSE and Correlations between pipelines
```
# Before and after prediction
df1 = X_test
df2 = Y_test
df3 = pd.DataFrame(columns=common_roi_cols,data=test_metrics['test_preds'])
df3[subject_ID_col] = df1[subject_ID_col].values
df4 = pd.DataFrame(columns=common_roi_cols,data=test_metrics_null['test_preds'])
df4[subject_ID_col] = df1[subject_ID_col].values
pipeline_err = pd.DataFrame(columns=[subject_ID_col]+common_roi_cols)
model_err_null = pd.DataFrame(columns=[subject_ID_col]+common_roi_cols)
model_err = pd.DataFrame(columns=[subject_ID_col]+common_roi_cols)
pipeline_err[subject_ID_col] = df1[subject_ID_col].values
pipeline_err[common_roi_cols] = (df1[common_roi_cols].values - df2[common_roi_cols].values)**2
model_err[subject_ID_col] = df1[subject_ID_col].values
model_err[common_roi_cols] = (df3[common_roi_cols].values - df2[common_roi_cols].values)**2
model_err_null[subject_ID_col] = df1[subject_ID_col].values
model_err_null[common_roi_cols] = (df4[common_roi_cols].values - df2[common_roi_cols].values)**2
#MSE
print('pipeline 1 vs pipeline 2 MSE: {:4.2f}'.format(pipeline_err[common_roi_cols].values.mean()))
print('null model predictions vs pipeline 2 MSE: {:4.2f}'.format(model_err_null[common_roi_cols].values.mean()))
print('model predictions vs pipeline 2 MSE: {:4.2f}'.format(model_err[common_roi_cols].values.mean()))
#Corr
pipeline_xcorr_df = cross_correlations(df1,df2,subject_ID_col)
pipeline_xcorr_df['pair'] = np.tile('{} (orig),{}'.format(input_pipe,output_pipe),len(pipeline_xcorr_df))
print('pipeline 1 vs pipeline 2 correlation: {:4.2f}'.format(pipeline_xcorr_df['correlation'].mean()))
model_null_xcorr_df = cross_correlations(df4,df2,subject_ID_col)
model_null_xcorr_df['pair'] = np.tile('{}_pred (null),{}'.format(output_pipe,output_pipe),len(model_null_xcorr_df))
print('null model predictions vs pipeline 2 correlation: {:4.2f}'.format(model_null_xcorr_df['correlation'].mean()))
model_xcorr_df = cross_correlations(df3,df2,subject_ID_col)
model_xcorr_df['pair'] = np.tile('{}_pred (real),{}'.format(input_pipe,output_pipe),len(model_xcorr_df))
print('model predictions vs pipeline 2 correlation: {:4.2f}'.format(model_xcorr_df['correlation'].mean()))
xcorr_df_concat = pipeline_xcorr_df.append(model_null_xcorr_df).append(model_xcorr_df)
```
### Pearson's correlation (Test subset)
```
sns.set(font_scale=1)
with sns.axes_style("whitegrid"):
g = sns.catplot(x='correlation',y='ROI',hue='pair',order=common_roi_cols,
data=xcorr_df_concat,aspect=0.75,height=10,kind='strip')
```
### MSE distribution (Test subset)
```
plot_df = test_err_melt_concat
sns.set(font_scale=1)
with sns.axes_style("whitegrid"):
g = sns.catplot(x='err',y='ROI',order=common_roi_cols,col='model',
data=plot_df,aspect=0.75,height=10,kind='violin')
```
### TSNE
```
preds = train_metrics['train_preds']
tsne_embed = TSNE(n_components=2,init='pca').fit_transform(preds)
tsne_annot = ml_demograph[ml_demograph[subject_ID_col].isin(subject_idx_train)]['DX_GROUP'].values
plot_df = pd.DataFrame(columns=['x','y','annot','subset'])
plot_df['x'] = tsne_embed[:,0]
plot_df['y'] = tsne_embed[:,1]
plot_df['annot'] = tsne_annot
plot_df['subset'] = np.tile('train',len(tsne_annot))
with sns.axes_style("whitegrid"):
g = sns.lmplot(x='x',y='y',hue='annot',fit_reg=False, markers='o',data=plot_df,
height=6,scatter_kws={'alpha':0.5}); #x_jitter=20,y_jitter=20,
```
| github_jupyter |
# 基于 BipartiteGraphSage 的二部图无监督学习
二部图是电子商务推荐场景中很常见的一种图,GraphScope提供了针对二部图处理学习任务的模型。本次教程,我们将会展示GraphScope如何使用BipartiteGraphSage算法在二部图上训练一个无监督学习模型。
本次教程的学习任务是链接预测,通过计算在图中用户顶点和商品顶点之间存在边的概率来预测链接。
在这一任务中,我们使用GraphScope内置的BipartiteGraphSage算法在 [U2I](http://graph-learn-dataset.oss-cn-zhangjiakou.aliyuncs.com/u2i.zip) 数据集上训练一个模型,这一训练模型可以用来预测用户顶点和商品顶点之间的链接。这一任务可以被看作在一个异构链接网络上的无监督训练任务。
在这一任务中,BipartiteGraphSage算法会将图中的结构信息和属性信息压缩为每个节点上的低维嵌入向量,这些嵌入和表征可以进一步用来预测节点间的链接。
这一教程将会分为以下几个步骤:
- 启动GraphScope的学习引擎,并将图关联到引擎上
- 使用内置的GCN模型定义训练过程,并定义相关的超参
- 开始训练
```
# Install graphscope package if you are NOT in the Playground
!pip3 install graphscope
!pip3 uninstall -y importlib_metadata # Address an module conflict issue on colab.google. Remove this line if you are not on colab.
# Import the graphscope module.
import graphscope
graphscope.set_option(show_log=False) # enable logging
# Load u2i dataset
from graphscope.dataset import load_u2i
graph = load_u2i()
```
## Launch learning engine
然后,我们需要定义一个特征列表用于图的训练。训练特征集合必须从点的属性集合中选取。在这个例子中,我们选择了 "feature" 属性作为训练特征集,这一特征集也是 U2I 数据中用户顶点和商品顶点的特征集。
借助定义的特征列表,接下来,我们使用 [graphlearn](https://graphscope.io/docs/reference/session.html#graphscope.Session.graphlearn) 方法来开启一个学习引擎。
在这个例子中,我们在 "graphlearn" 方法中,指定在数据中 "u" 类型的顶点和 "i" 类型顶点和 "u-i" 类型边上进行模型训练。
```
# launch a learning engine.
lg = graphscope.graphlearn(
graph,
nodes=[("u", ["feature"]), ("i", ["feature"])],
edges=[(("u", "u-i", "i"), ["weight"]), (("i", "u-i_reverse", "u"), ["weight"])],
)
```
这里我们使用内置的`BipartiteGraphSage`模型定义训练过程。你可以在 [Graph Learning Model](https://graphscope.io/docs/learning_engine.html#data-model) 获取更多内置学习模型的信息。
在本次示例中,我们使用 tensorflow 作为神经网络后端训练器。
```
import numpy as np
import tensorflow as tf
import graphscope.learning
from graphscope.learning.examples import BipartiteGraphSage
from graphscope.learning.graphlearn.python.model.tf.optimizer import get_tf_optimizer
from graphscope.learning.graphlearn.python.model.tf.trainer import LocalTFTrainer
# Unsupervised GraphSage.
def train(config, graph):
def model_fn():
return BipartiteGraphSage(
graph,
config["batch_size"],
config["hidden_dim"],
config["output_dim"],
config["hops_num"],
config["u_neighs_num"],
config["i_neighs_num"],
u_features_num=config["u_features_num"],
u_categorical_attrs_desc=config["u_categorical_attrs_desc"],
i_features_num=config["i_features_num"],
i_categorical_attrs_desc=config["i_categorical_attrs_desc"],
neg_num=config["neg_num"],
use_input_bn=config["use_input_bn"],
act=config["act"],
agg_type=config["agg_type"],
need_dense=config["need_dense"],
in_drop_rate=config["drop_out"],
ps_hosts=config["ps_hosts"],
)
graphscope.learning.reset_default_tf_graph()
trainer = LocalTFTrainer(
model_fn,
epoch=config["epoch"],
optimizer=get_tf_optimizer(
config["learning_algo"], config["learning_rate"], config["weight_decay"]
),
)
trainer.train()
u_embs = trainer.get_node_embedding("u")
np.save("u_emb", u_embs)
i_embs = trainer.get_node_embedding("i")
np.save("i_emb", i_embs)
# Define hyperparameters
config = {
"batch_size": 128,
"hidden_dim": 128,
"output_dim": 128,
"u_features_num": 1,
"u_categorical_attrs_desc": {"0": ["u_id", 10000, 64]},
"i_features_num": 1,
"i_categorical_attrs_desc": {"0": ["i_id", 10000, 64]},
"hops_num": 1,
"u_neighs_num": [10],
"i_neighs_num": [10],
"neg_num": 10,
"learning_algo": "adam",
"learning_rate": 0.001,
"weight_decay": 0.0005,
"epoch": 5,
"use_input_bn": True,
"act": tf.nn.leaky_relu,
"agg_type": "gcn",
"need_dense": True,
"drop_out": 0.0,
"ps_hosts": None,
}
```
## 执行训练过程
在定义完训练过程和超参后,现在我们可以使用学习引擎和定义的超参开始训练过程。
```
train(config, lg)
```
| github_jupyter |
##### Copyright 2020 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Introduction to tensor slicing
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/tensor_slicing"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/tensor_slicing.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/tensor_slicing.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/tensor_slicing.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
When working on ML applications such as object detection and NLP, it is sometimes necessary to work with sub-sections (slices) of tensors. For example, if your model architecture includes routing, where one layer might control which training example gets routed to the next layer. In this case, you could use tensor slicing ops to split the tensors up and put them back together in the right order.
In NLP applications, you can use tensor slicing to perform word masking while training. For example, you can generate training data from a list of sentences by choosing a word index to mask in each sentence, taking the word out as a label, and then replacing the chosen word with a mask token.
In this guide, you will learn how to use the TensorFlow APIs to:
* Extract slices from a tensor
* Insert data at specific indices in a tensor
This guide assumes familiarity with tensor indexing. Read the indexing sections of the [Tensor](https://www.tensorflow.org/guide/tensor#indexing) and [TensorFlow NumPy](https://www.tensorflow.org/guide/tf_numpy#indexing) guides before getting started with this guide.
## Setup
```
import tensorflow as tf
import numpy as np
```
## Extract tensor slices
Perform NumPy-like tensor slicing using `tf.slice`.
```
t1 = tf.constant([0, 1, 2, 3, 4, 5, 6, 7])
print(tf.slice(t1,
begin=[1],
size=[3]))
```
Alternatively, you can use a more Pythonic syntax. Note that tensor slices are evenly spaced over a start-stop range.
```
print(t1[1:4])
```
<img src="images/tf_slicing/slice_1d_1.png">
```
print(t1[-3:])
```
<img src="images/tf_slicing/slice_1d_2.png">
For 2-dimensional tensors,you can use something like:
```
t2 = tf.constant([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]])
print(t2[:-1, 1:3])
```
<img src="images/tf_slicing/slice_2d_1.png">
You can use `tf.slice` on higher dimensional tensors as well.
```
t3 = tf.constant([[[1, 3, 5, 7],
[9, 11, 13, 15]],
[[17, 19, 21, 23],
[25, 27, 29, 31]]
])
print(tf.slice(t3,
begin=[1, 1, 0],
size=[1, 1, 2]))
```
You can also use `tf.strided_slice` to extract slices of tensors by 'striding' over the tensor dimensions.
Use `tf.gather` to extract specific indices from a single axis of a tensor.
```
print(tf.gather(t1,
indices=[0, 3, 6]))
# This is similar to doing
t1[::3]
```
<img src="images/tf_slicing/slice_1d_3.png">
`tf.gather` does not require indices to be evenly spaced.
```
alphabet = tf.constant(list('abcdefghijklmnopqrstuvwxyz'))
print(tf.gather(alphabet,
indices=[2, 0, 19, 18]))
```
<img src="images/tf_slicing/gather_1.png">
To extract slices from multiple axes of a tensor, use `tf.gather_nd`. This is useful when you want to gather the elements of a matrix as opposed to just its rows or columns.
```
t4 = tf.constant([[0, 5],
[1, 6],
[2, 7],
[3, 8],
[4, 9]])
print(tf.gather_nd(t4,
indices=[[2], [3], [0]]))
```
<img src="images/tf_slicing/gather_2.png">
```
t5 = np.reshape(np.arange(18), [2, 3, 3])
print(tf.gather_nd(t5,
indices=[[0, 0, 0], [1, 2, 1]]))
# Return a list of two matrices
print(tf.gather_nd(t5,
indices=[[[0, 0], [0, 2]], [[1, 0], [1, 2]]]))
# Return one matrix
print(tf.gather_nd(t5,
indices=[[0, 0], [0, 2], [1, 0], [1, 2]]))
```
## Insert data into tensors
Use `tf.scatter_nd` to insert data at specific slices/indices of a tensor. Note that the tensor into which you insert values is zero-initialized.
```
t6 = tf.constant([10])
indices = tf.constant([[1], [3], [5], [7], [9]])
data = tf.constant([2, 4, 6, 8, 10])
print(tf.scatter_nd(indices=indices,
updates=data,
shape=t6))
```
Methods like `tf.scatter_nd` which require zero-initialized tensors are similar to sparse tensor initializers. You can use `tf.gather_nd` and `tf.scatter_nd` to mimic the behavior of sparse tensor ops.
Consider an example where you construct a sparse tensor using these two methods in conjunction.
```
# Gather values from one tensor by specifying indices
new_indices = tf.constant([[0, 2], [2, 1], [3, 3]])
t7 = tf.gather_nd(t2, indices=new_indices)
```
<img src="images/tf_slicing/gather_nd_sparse.png">
```
# Add these values into a new tensor
t8 = tf.scatter_nd(indices=new_indices, updates=t7, shape=tf.constant([4, 5]))
print(t8)
```
This is similar to:
```
t9 = tf.SparseTensor(indices=[[0, 2], [2, 1], [3, 3]],
values=[2, 11, 18],
dense_shape=[4, 5])
print(t9)
# Convert the sparse tensor into a dense tensor
t10 = tf.sparse.to_dense(t9)
print(t10)
```
To insert data into a tensor with pre-existing values, use `tf.tensor_scatter_nd_add`.
```
t11 = tf.constant([[2, 7, 0],
[9, 0, 1],
[0, 3, 8]])
# Convert the tensor into a magic square by inserting numbers at appropriate indices
t12 = tf.tensor_scatter_nd_add(t11,
indices=[[0, 2], [1, 1], [2, 0]],
updates=[6, 5, 4])
print(t12)
```
Similarly, use `tf.tensor_scatter_nd_sub` to subtract values from a tensor with pre-existing values.
```
# Convert the tensor into an identity matrix
t13 = tf.tensor_scatter_nd_sub(t11,
indices=[[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [2, 1], [2, 2]],
updates=[1, 7, 9, -1, 1, 3, 7])
print(t13)
```
Use `tf.tensor_scatter_nd_min` to copy element-wise minimum values from one tensor to another.
```
t14 = tf.constant([[-2, -7, 0],
[-9, 0, 1],
[0, -3, -8]])
t15 = tf.tensor_scatter_nd_min(t14,
indices=[[0, 2], [1, 1], [2, 0]],
updates=[-6, -5, -4])
print(t15)
```
Similarly, use `tf.tensor_scatter_nd_max` to copy element-wise maximum values from one tensor to another.
```
t16 = tf.tensor_scatter_nd_max(t14,
indices=[[0, 2], [1, 1], [2, 0]],
updates=[6, 5, 4])
print(t16)
```
## Further reading and resources
In this guide, you learned how to use the tensor slicing ops available with TensorFlow to exert finer control over the elements in your tensors.
* Check out the slicing ops available with TensorFlow NumPy such as `tf.experimental.numpy.take_along_axis` and `tf.experimental.numpy.take`.
* Also check out the [Tensor guide](https://www.tensorflow.org/guide/tensor) and the [Variable guide](https://www.tensorflow.org/guide/variable).
| github_jupyter |
# Object Detection @Edge with SageMaker Neo + Pytorch Yolov5
**SageMaker Studio Kernel**: Data Science
In this exercise you'll:
- Get a pre-trained model: Yolov5
- Prepare the model to compile it with Neo
- Compile the model for the target: **X86_64**
- Get the optimized model and run a simple local test
### install dependencies
```
!apt update -y && apt install -y libgl1
!pip install torch==1.7.0 torchvision==0.8.0 opencv-python dlr==1.8.0
```
## 1) Get a pre-trained model and export it to torchscript
-> SagMaker Neo expectes the model in the traced format
```
import os
import urllib.request
if not os.path.isdir('yolov5'):
!git clone https://github.com/ultralytics/yolov5 && \
cd yolov5 && git checkout v5.0 && \
git apply ../../models/01_YoloV5/01_Pytorch/yolov5_inplace.patch
if not os.path.exists('yolov5s.pt'):
urllib.request.urlretrieve('https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5s.pt', 'yolov5s.pt')
import torch.nn as nn
import torch
import sys
sys.path.insert(0, 'yolov5')
model = torch.load('yolov5s.pt')['model'].float().cpu()
## We need to replace these two activation functions to make it work with TVM.
# SiLU https://arxiv.org/pdf/1606.08415.pdf ----------------------------------------------------------------------------
class SiLU(nn.Module): # export-friendly version of nn.SiLU()
@staticmethod
def forward(x):
return x * torch.sigmoid(x)
class Hardswish(nn.Module): # export-friendly version of nn.Hardswish()
@staticmethod
def forward(x):
# return x * F.hardsigmoid(x) # for torchscript and CoreML
return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX
for k,m in model.named_modules():
t = type(m)
layer_name = f"{t.__module__}.{t.__name__}"
if layer_name == 'models.common.Conv': # assign export-friendly activations
if isinstance(m.act, nn.Hardswish):
m.act = Hardswish()
elif isinstance(m.act, nn.SiLU):
m.act = SiLU()
img_size=640
inp = torch.rand(1,3,img_size,img_size).float().cpu()
model.eval()
p = model(inp)
model_trace = torch.jit.trace(model, inp, strict=False)
model_trace.save('model.pth')
```
## 2) Create a package with the model and upload to S3
```
import tarfile
import sagemaker
sagemaker_session = sagemaker.Session()
model_name='yolov5'
with tarfile.open("model.tar.gz", "w:gz") as f:
f.add("model.pth")
f.list()
s3_uri = sagemaker_session.upload_data('model.tar.gz', key_prefix=f'{model_name}/model')
print(s3_uri)
```
## 3) Compile the model with SageMaker Neo (X86_64)
```
import time
import boto3
import sagemaker
role = sagemaker.get_execution_role()
sm_client = boto3.client('sagemaker')
compilation_job_name = f'{model_name}-pytorch-{int(time.time()*1000)}'
sm_client.create_compilation_job(
CompilationJobName=compilation_job_name,
RoleArn=role,
InputConfig={
'S3Uri': s3_uri,
'DataInputConfig': f'{{"input": [1,3,{img_size},{img_size}]}}',
'Framework': 'PYTORCH'
},
OutputConfig={
'S3OutputLocation': f's3://{sagemaker_session.default_bucket()}/{model_name}-pytorch/optimized/',
'TargetPlatform': {
'Os': 'LINUX',
'Arch': 'X86_64'
}
},
StoppingCondition={ 'MaxRuntimeInSeconds': 900 }
)
while True:
resp = sm_client.describe_compilation_job(CompilationJobName=compilation_job_name)
if resp['CompilationJobStatus'] in ['STARTING', 'INPROGRESS']:
print('Running...')
else:
print(resp['CompilationJobStatus'], compilation_job_name)
break
time.sleep(5)
```
## 4) Download the compiled model
```
output_model_path = f's3://{sagemaker_session.default_bucket()}/{model_name}-pytorch/optimized/model-LINUX_X86_64.tar.gz'
!aws s3 cp $output_model_path /tmp/model.tar.gz
!rm -rf model_object_detection && mkdir model_object_detection
!tar -xzvf /tmp/model.tar.gz -C model_object_detection
```
## 5) Run the model locally
```
import urllib.request
urllib.request.urlretrieve('https://i2.wp.com/petcaramelo.com/wp-content/uploads/2020/05/doberman-cores.jpg', 'dogs.jpg')
%matplotlib inline
import numpy as np
import cv2
import matplotlib.pyplot as plt
import os
# Classes
labels= ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
'hair drier', 'toothbrush'] # class names
```
### load the model using the runtime DLR
```
import dlr
# load the model (CPU x86_64)
model = dlr.DLRModel('model_object_detection', 'cpu')
import sys
sys.path.insert(0,'../models/01_YoloV5/01_Pytorch')
from processing import Processor
proc = Processor(labels, threshold=0.25, iou_threshold=0.45)
img = cv2.imread('dogs.jpg')
x = proc.pre_process(img)
y = model.run(x)[0]
(bboxes, scores, cids), image = proc.post_process(y, img.shape, img.copy())
plt.figure(figsize=(10,10))
plt.imshow(image)
```
# Done! :)
| github_jupyter |
# K-Fold Cross Validation + Grid Search cv + Principal Componenet Analysis + Kernel SVM on Wine Dataset
GridSearchCV implements a “fit” method and a “predict” method like any classifier except that the parameters of the classifier used to predict is optimized by cross-validation. ... This enables searching over any sequence of parameter settings.
Cross-validation is a statistical method used to estimate the skill of machine learning models.
It is commonly used in applied machine learning to compare and select a model for a given predictive modeling problem because it is easy to understand, easy to implement, and results in skill estimates that generally have a lower bias than other methods.
In this tutorial, you will discover a gentle introduction to the k-fold cross-validation procedure for estimating the skill of machine learning models.
<ol>
<li>That k-fold cross validation is a procedure used to estimate the skill of the model on new data.
<li>There are common tactics that you can use to select the value of k for your dataset.
<li>There are commonly used variations on cross-validation such as stratified and repeated that are available in scikit-learn.
</ol>
```
import numpy as np
import pandas as pd
import time
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
fig = plt.figure(figsize=(10, 10))
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
# Importing the dataset into a pandas dataframe
df = pd.read_csv("Wine.csv")
df.describe()
df.info()
df.head(10)
df.tail(10)
```
<h4>So Dependent Variable of the dataset is splitted into 3 classes namely(1, 2, 3)</h4>
```
df.shape
#Spliting The dataset into Independent and Dependent Variable
X = df.iloc[:, 0:13].values
Y = df.iloc[:, 13].values
# Splitting the dataset into the Training set and Test set
X_train, X_test, Y_train, Y_test = train_test_split(X,
Y,
test_size = 0.2,
random_state = 0)
print("Size of X_train: {}".format(X_train.shape))
print("Size of X_test: {}".format(X_test.shape))
print("Size of Y_train: {}".format(Y_train.shape))
print("Size of Y_test: {}".format(Y_test.shape))
# Feature Scaling
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
```
#Applying Principal Component Analysis
#Commenting This out because we got out max variance and we could have
#Changed this block, but instead we are writing new section just to
#make others understand
"""pca = PCA(n_components = None)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)"""
We can see that First Two has the highest variance
So, we will set out n_components in PCA as 2
```
#Checking The Variances
Variances = pca.explained_variance_
Variances
# Applying PCA
pca = PCA(n_components = 2)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
# Fitting Logistic Regression to the Training set
classifier = SVC(kernel = 'rbf', random_state = 0)
classifier.fit(X_train, Y_train)
#Predicting The Results
y_pred = classifier.predict(X_test)
y_pred
#Comparing the results
cm = confusion_matrix(Y_test, y_pred)
cm
#Checking The Accuracy score
acc = accuracy_score(Y_test, y_pred)
print("The Accuracy on the model is: {}%".format((acc*100).astype('int32')))
```
<h3>Building a text report showing the main classification metrics</h3>
```
cr = classification_report(Y_test, y_pred)
print(cr)
```
# Twerking The Hyper Parameters
```
#Applying KFold Cross Validation
fold = cross_val_score(estimator = classifier,
cv = 10,
X = X_train,
y = Y_train,
n_jobs = -1)
fold
print("Average Accuracy: {}%".format(((fold.mean())*100).astype('int32')))
print("Variance : {}".format(fold.std()))
```
Well the Cross_Val predicted just right 97% is what our classifier acvhieved without any hyper parameter optimization
<h4>Let's see what Grid Search CV can do</h4>
```
#Applying GridSearch Cv
params = [{'C': [1, 10, 100, 1000], 'kernel': ['linear']},
{'C': [1, 10, 100, 1000], 'kernel': ['rbf'], 'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]},
{'C': [1, 10, 100, 1000], 'kernel': ['poly'], 'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], 'degree': [2, 3, 4]},
{'C': [1, 10, 100, 1000], 'kernel': ['sigmoid'], 'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]}]
start = time.time()
gscv = GridSearchCV(estimator = classifier,
param_grid = params,
scoring= 'accuracy',
n_jobs= -1,
cv= 10)
gscv = gscv.fit(X_train, Y_train)
end = time.time()
tt = end - start
print(tt)
accuracy = gscv.best_score_
accuracy
best_parameters = gscv.best_params_
best_parameters
```
<h3>So according to grid sear best parameters for our model is {'C': 1, 'kernel': 'linear'}</h3>
Training The model with optimal parameters
```
classifier2 = SVC(kernel = 'rbf', random_state = 0)
classifier2.fit(X_train, Y_train)
y_pred2 = classifier2.predict(X_test)
cm = confusion_matrix(Y_test, y_pred2)
print("Average Accuracy: {}%".format(((fold.mean())*100).astype('int32')))
print("Variance : {}".format(fold.std()))
cm
# Visualising the Training set results
x_set, y_set = X_train, Y_train
#Creating the grid of Minimum and maximun values from X_train
X1, X2 = np.meshgrid(np.arange(start = x_set[:, 0].min() - 1,
stop = x_set[:, 0].max() + 1,
step = 0.01),
np.arange(start = x_set[:, 1].min() - 1,
stop = x_set[:, 1].max() + 1,
step = 0.01))
#Plotting the line Classifier
plt.contourf(X1,
X2,
classifier.predict(np.array([X1.ravel(),
X2.ravel()]).T).reshape(X1.shape),
alpha = 0.4,
cmap = ListedColormap(('red', 'green', 'blue')))
#Plotting The Datapoint in red and gree color
for i,j in enumerate(np.unique(y_set)):
plt.scatter(x_set[y_set == j, 0],
x_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i),
label = j
)
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
plt.title("PCA + Logistic Regression (Training Set)")
plt.xlabel('PCA1')
plt.ylabel('PCA2')
plt.legend()
# Visualising the Test set results
x_set, y_set = X_test, Y_test
#Creating the grid of Minimum and maximun values from X_train
X1, X2 = np.meshgrid(np.arange(start = x_set[:, 0].min() - 1,
stop = x_set[:, 0].max() + 1,
step = 0.01),
np.arange(start = x_set[:, 1].min() - 1,
stop = x_set[:, 1].max() + 1,
step = 0.01))
#Plotting the line Classifier
plt.contourf(X1,
X2,
classifier.predict(np.array([X1.ravel(),
X2.ravel()]).T).reshape(X1.shape),
alpha = 0.4,
cmap = ListedColormap(('red', 'green', 'blue')))
#Plotting The Datapoint in red and gree color
for i,j in enumerate(np.unique(y_set)):
plt.scatter(x_set[y_set == j, 0],
x_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i),
label = j
)
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
plt.title("PCA + Logistic Regression (Test Set)")
plt.xlabel('PCA1')
plt.ylabel('PCA2')
plt.legend()
```
| github_jupyter |
```
from __future__ import print_function, division
import pickle
import torch
import sys
# sys.path.append('../../res/')
from loader import synthetic_loader
# from loader import city_scapes_loader
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import copy
import os
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import pickle
import sys
from PIL import Image
import torch.nn.functional as F
import math
from model import Unet
from model import FCN
from config_file import *
BATCH_SIZE = 2
CITYSCAPES = False
# TRAIN_DIR = '/data/graphics/'
VAL_DIR = '/data/graphics/toyota-pytorch/biased_dataset_generalization/datasets/test-set/val/'
data_transforms = {
'train': transforms.Compose([
transforms.Resize(IN_SIZE),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(IN_SIZE),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
}
label_transforms = {
'train': transforms.Compose([
transforms.Resize(IN_SIZE),
transforms.ToTensor()
]),
'val': transforms.Compose([
transforms.Resize(IN_SIZE),
transforms.ToTensor()
])
}
if CITYSCAPES:
# dset_train = city_scapes_loader.ImageFolder(TRAIN_DIR, data_transforms['train'])
dset_val = city_scapes_loader.ImageFolder(VAL_DIR, data_transforms['val'])
else:
# dset_train = synthetic_loader.ImageFolder(TRAIN_DIR, data_transforms['train'],target_transform=label_transforms['train'])
dset_val = synthetic_loader.ImageFolder(VAL_DIR, data_transforms['val'],target_transform=label_transforms['val'])
# train_loader = torch.utils.data.DataLoader(dset_train, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)
val_loader = torch.utils.data.DataLoader(dset_val, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)
dset_loaders = {'val':val_loader}
dset_sizes = {}
# dset_sizes['train'] = len(dset_train)
dset_sizes['val'] = len(dset_val)
def batch_iou_sum(labels, predictions):
iou_sum = 0
for i in range(len(labels)):
iou_sum += binary_iou(labels[i],predictions[i])
return iou_sum
def batch_ppa_sum(labels, predictions):
ppa_sum = 0
for i in range(len(labels)):
ppa_sum += per_pixel_acc(labels[i],predictions[i])
return ppa_sum
def binary_iou(l,p):
label = l.cpu().numpy().astype(int)
pred = p.detach().cpu().numpy().astype(int)
intersection = np.sum(np.bitwise_and(label,pred))
union = np.sum(np.bitwise_or(label,pred))
if union == 0:
return 0
else:
return intersection/union
def per_pixel_acc(l,p):
label = l.cpu().numpy().astype(int)
pred = p.detach().cpu().numpy().astype(int)
h,w = label.shape
intersection = np.sum(np.bitwise_xor(label,pred))
return ((h*w) - intersection)/(h*w)
def show_results(model_path,GPU=False):
total_iou = 0
total_ppa = 0
count = 0
if GPU == False:
model = torch.load(model_path,map_location='cpu')
model.cpu();
else:
model = torch.load(model_path)
for data in dset_loaders['val']:
inputs, labels = data
if GPU == False:
outputs = model(inputs)
else:
outputs = model(inputs.cuda())
predictions = torch.argmax(outputs,dim=1)
batch_iou = batch_iou_sum(labels,predictions)
batch_ppa = batch_ppa_sum(labels,predictions)
if math.isnan(batch_iou):
break
if math.isnan(batch_ppa):
break
else:
total_iou += batch_iou
total_ppa += batch_ppa
count += len(labels)
average_ppa = total_ppa/count
average_iou = total_iou/count
return average_iou, average_ppa
def evaluate_model(model_path,GPU=False):
total_iou = 0
total_ppa = 0
count = 0
if GPU == False:
model = torch.load(model_path,map_location='cpu')
model.cpu();
else:
model = torch.load(model_path)
for data in dset_loaders['val']:
inputs, labels = data
if GPU == False:
outputs = model(inputs)
else:
outputs = model(inputs.cuda())
predictions = torch.argmax(outputs,dim=1)
batch_iou = batch_iou_sum(labels,predictions)
batch_ppa = batch_ppa_sum(labels,predictions)
if math.isnan(batch_iou):
break
if math.isnan(batch_ppa):
break
else:
total_iou += batch_iou
total_ppa += batch_ppa
count += len(labels)
average_ppa = total_ppa/count
average_iou = total_iou/count
return average_iou, average_ppa
trained_models = {1:'../runs/2018-10-30_B6T5J8CG/saved_models/B6T5J8CG.pt',10:'../runs/2018-10-30_O1B5C4OH/saved_models/O1B5C4OH.pt',100:'../runs/2018-10-31_RM4WZDIP/saved_models/RM4WZDIP.pt'}
GPU = True
ious = {}
ppas = {}
for num in trained_models.keys():
print('working on %s car models'%num)
path = trained_models[num]
iou,ppa = evaluate_model(path,GPU=True)
print('Average IOU for %s models:'%num,iou)
print('Average PPA for %s models:'%num,ppa)
ious[num] = iou
ppas[num] = ppa
```
# Visualize random results
| github_jupyter |
# Building a Regression Model for a Financial Dataset
In this notebook, you will build a simple linear regression model to predict the closing AAPL stock price. The lab objectives are:
* Pull data from BigQuery into a Pandas dataframe
* Use Matplotlib to visualize data
* Use Scikit-Learn to build a regression model
```
%%bash
bq mk -d ai4f
bq load --autodetect --source_format=CSV ai4f.AAPL10Y gs://cloud-training/ai4f/AAPL10Y.csv
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
plt.rc('figure', figsize=(12, 8.0))
```
## Pull Data from BigQuery
In this section we'll use a magic function to query a BigQuery table and then store the output in a Pandas dataframe. A magic function is just an alias to perform a system command. To see documentation on the "bigquery" magic function execute the following cell:
```
%%bigquery?
```
The query below selects everything you'll need to build a regression model to predict the closing price of AAPL stock. The model will be very simple for the purposes of demonstrating BQML functionality. The only features you'll use as input into the model are the previous day's closing price and a three day trend value. The trend value can only take on two values, either -1 or +1. If the AAPL stock price has increased over any two of the previous three days then the trend will be +1. Otherwise, the trend value will be -1.
Note, the features you'll need can be generated from the raw table `ai4f.AAPL10Y` using Pandas functions. However, it's better to take advantage of the serverless-ness of BigQuery to do the data pre-processing rather than applying the necessary transformations locally.
```
%%bigquery df
WITH
raw AS (
SELECT
date,
close,
LAG(close, 1) OVER(ORDER BY date) AS min_1_close,
LAG(close, 2) OVER(ORDER BY date) AS min_2_close,
LAG(close, 3) OVER(ORDER BY date) AS min_3_close,
LAG(close, 4) OVER(ORDER BY date) AS min_4_close
FROM
`ai4f.AAPL10Y`
ORDER BY
date DESC ),
raw_plus_trend AS (
SELECT
date,
close,
min_1_close,
IF (min_1_close - min_2_close > 0, 1, -1) AS min_1_trend,
IF (min_2_close - min_3_close > 0, 1, -1) AS min_2_trend,
IF (min_3_close - min_4_close > 0, 1, -1) AS min_3_trend
FROM
raw ),
train_data AS (
SELECT
date,
close,
min_1_close AS day_prev_close,
IF (min_1_trend + min_2_trend + min_3_trend > 0, 1, -1) AS trend_3_day
FROM
raw_plus_trend
ORDER BY
date ASC )
SELECT
*
FROM
train_data
```
View the first five rows of the query's output. Note that the object `df` containing the query output is a Pandas Dataframe.
```
print(type(df))
df.dropna(inplace=True)
df.head()
```
## Visualize data
The simplest plot you can make is to show the closing stock price as a time series. Pandas DataFrames have built in plotting funtionality based on Matplotlib.
```
df.plot(x='date', y='close');
```
You can also embed the `trend_3_day` variable into the time series above.
```
start_date = '2018-06-01'
end_date = '2018-07-31'
plt.plot(
'date', 'close', 'k--',
data = (
df.loc[pd.to_datetime(df.date).between(start_date, end_date)]
)
)
plt.scatter(
'date', 'close', color='b', label='pos trend',
data = (
df.loc[df.trend_3_day == 1 & pd.to_datetime(df.date).between(start_date, end_date)]
)
)
plt.scatter(
'date', 'close', color='r', label='neg trend',
data = (
df.loc[(df.trend_3_day == -1) & pd.to_datetime(df.date).between(start_date, end_date)]
)
)
plt.legend()
plt.xticks(rotation = 90);
df.shape
```
## Build a Regression Model in Scikit-Learn
In this section you'll train a linear regression model to predict AAPL closing prices when given the previous day's closing price `day_prev_close` and the three day trend `trend_3_day`. A training set and test set are created by sequentially splitting the data after 2000 rows.
```
features = ['day_prev_close', 'trend_3_day']
target = 'close'
X_train, X_test = df.loc[:2000, features], df.loc[2000:, features]
y_train, y_test = df.loc[:2000, target], df.loc[2000:, target]
# Create linear regression object. Don't include an intercept,
# TODO
# Train the model using the training set
# TODO
# Make predictions using the testing set
# TODO
# Print the root mean squared error of your predictions
# TODO
# Print the variance score (1 is perfect prediction)
# TODO
# Plot the predicted values against their corresponding true values
# TODO
```
The model's predictions are more or less in line with the truth. However, the utility of the model depends on the business context (i.e. you won't be making any money with this model). It's fair to question whether the variable `trend_3_day` even adds to the performance of the model:
```
print('Root Mean Squared Error: {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, X_test.day_prev_close))))
```
Indeed, the RMSE is actually lower if we simply use the previous day's closing value as a prediction! Does increasing the number of days included in the trend improve the model? Feel free to create new features and attempt to improve model performance!
| github_jupyter |
<a href="https://colab.research.google.com/github/chemaar/python-programming-course/blob/master/Lab_5_Data_Structures_Lists_Strings_STUDENT.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Lab 5: Data structures: Lists and strings
In this notebook, we propose and solve some exercises about basic data structures implemented through Python lists and strings.
* **In these exercises, we can always proceed solving the problems in a generic way or taking advantage of Python capabilities. As a recommendation, first, try the generic way (applicable to any programming language) and, then, using Python**
* **As a good programming practice, our test cases should ensure that all branches of the code are executed at least once.**
## List of exercises
1. Write a a program that creates a list of $n$ numbers initializing each position with a value (the numeric value of such position). Display the list in the console.
* Input: 5
* Expected output:
```
[0,1,3,4,5]
```
2. Write a program that given a list of $n$ numbers, calculates and displays the length of the list.
* Input: [3,4,5,6]
* Expected output:
```
The length of the list is: 4
```
3. Write a program that given a list of $n$ numbers, calculates and displays the max value within the list and its position.
* Input: [8, 1, 9, 2]
* Expected output:
```
The max value is: 9 in position: 2.
```
4. Write a program that given a list of $n$ numbers, calculates and displays the min value within the list and its position.
* Input: [8, 1, 9, 2]
* Expected output:
```
The min value is: 1 in position: 1.
```
5. Write a program that given a list of $n$ numbers, calculates and displays the sum of its elements.
* Input: [8, 1, 9, 2]
* Expected output:
```
The sum is: 20.
```
6. Write a program that given a list of $n$ numbers and a target number $k$, counts the number of occurrences of $k$ in the list.
* Input: [8, 1, 9, 1], $k=1$
* Expected output:
```
The number 1 has 2 occurrences.
```
7. Write a program that given a list of $n$ numbers and a target number $k$, returns and displays the position of the first apparition of the number $k$.
* Input: [8, 1, 9, 1], $k=1$
* Expected output:
```
The number 1 occurs first in position 1.
```
8. Write a program that given a list of $n$ numbers and a target number $k$, returns and displays the position of the last apparition.
* Input: [8, 1, 9, 1], $k=1$
* Expected output:
```
The number 1 occurs last in position 3.
```
9. Write a program that given a list of $n$ numbers and a target number $k$, returns and displays a list of all positions in which the value $k$ occurs.
* Input: [8, 1, 9, 1], $k=1$
* Expected output:
```
The number 1 occurs in [1, 3].
```
10. Write a program that given a list of $n$ numbers, creates a new list in reverse order.
* Input: [7, 5, 9, 2]
* Expected output:
```
The reverse list is [2, 9, 5, 7].
```
11. **Benchmarking**: compare your previous code against the Python versions. See the next example.
```
#Module to use timers
import time
values = [8,1,9,2]
#Start the timer
t = time.time()
min_value = values[0]
position = 0
for i in range(1, len(values)):
if values [i] < min_value:
min_value = values [i]
position = i
print("\n\tMy min version-->time Taken: %.5f sec" % (time.time()-t))
#Python way to find the max value
#Start the timer
t = time.time()
min_value = min(values)
position = values.index(min_value)
print("\n\tPython version-->time Taken: %.5f sec" % (time.time()-t))
```
12. Write a program that given a list of $n$ numbers, $v$, and an scalar number $k$, returns and displays the scalar product of $k \cdot v$.
* Input: [7, 5, 9, 2], k = 3
* Expected output:
```
The scalar product of 3 and [7, 5, 9, 2] is [21, 15, 27, 6].
```
14. Write a program that given two lists of $n$ numbers, returns and displays the vector product of $l1 \cdot l2$.
* Input: [7, 5, 9, 2], [4, 5, 6, 7]
* Expected output:
```
The vector product of [7, 5, 9, 2] and [4, 5, 6, 7] is [28, 25, 54, 14].
```
15. Write a program that given two lists of $n$ numbers, returns all combination of pairs (cartesian product).
* Input: [7, 5, 9, 2], [4, 5, 6, 7]
* Expected output:
```
The cartesian product is: [[7, 4], [7, 5], [7, 6], [7, 7], [5, 4], [5, 5], [5, 6], [5, 7], [9, 4], [9, 5], [9, 6], [9, 7], [2, 4], [2, 5], [2, 6], [2, 7]].
```
16. Write a program that given a list of $n$ numbers, returns and displays the average of the list numbers.
* Input: [4, 5, 6, 7]
* Expected output:
```
The average of [4, 5, 6, 7] is 5.5.
```
17. Write a program that given a list of $n$ numbers and a number $k$, returns the first $k$ numbers.
* Input: [4, 5, 6, 7], k = 2
* Expected output:
```
[4,5]
```
18. Write a program that given a list of $n$ numbers and a number $k$, returns the last $k$ numbers.
* Input: [4, 5, 6, 7], k = 2
* Expected output:
```
[7,6]
```
19. Write a program that given a list of $n$ numbers, returns a new list containing in the same position the factorial of that value (if the value is < 0, the return value will be -1) .
* Input: [5, 0, 1, -1]
* Expected output:
```
[120, 1, 1, -1]
```
20. Write a program that given a list of $n$ numbers, returns a new list without the repeated numbers.
* Input: [4, 5, 5, 6, 6, 8]
* Expected output:
```
[4, 5, 6, 8]
```
21. Write a program that given two lists of $n$ numbers, returns the union of both lists.
* Input: [4,5,6] [5,7,8,9]
* Expected output:
```
[4, 5, 6, 7, 8, 9]
```
22. Write a program that given two lists of $n$ numbers, returns the intersection of both lists.
* Input: [4,5,6] [5,7,8,9]
* Expected output:
```
[5]
```
23. Write a program that asks the user for $n$ numbers and returns a sorted list.
* Use the function `insert(pos, value)`
* Input: 6, 4, 8
* Expected output:
```
[4, 6, 8]
```
24. Write a program that asks the user for $n$ numbers and returns a sorted list in descending order.
* Use the function `insert(pos, value)`
* Input: 6, 4, 8
* Expected output:
```
[8, 6, 4]
```
25. Write a program that given a list of $n$ numbers and a parameter $k$, creates chunks of $k$ elements.
* Input: [1,2,3,4,5,6,7,8,9], k = 3
* Expected output:
```
[ [1,2,3], [4,5,6], [7,8,9] ]
```
26. Write a program that given a number $n$ and an initial value $k$, creates a list of size $n$ with all positions having the initial value.
* Input: n = 10, k = -1
* Expected output:
```
[-1, -1, -1, -1, -1]
```
27. Write a program that given a string, displays the lenght of the string.
* Input: Hello
* Expected output:
```
The lenght of Hello is 5.
```
28. Explore the string object methods.
```
dir ("")
```
28. Write a program that given a string, displays the string in reverse order.
* Input: Hello
* Expected output:
```
Hello and olleH
```
29. Write a program that given a string, displays whether the string is a palindrome.
* Input: anna
* Expected output:
```
anna is a palindrome True.
```
30. Write a program that given a string, displays the string in uppercase letters.
* Make use of function `ord(char)`-->ASCII number of the char.
* Input: This is a string
* Expected output:
```
THIS IS A STRING
```
31. Write a program that given a string, displays the string in lowercase letters.
* Input: THIS IS A STRING
* Expected output:
```
this is a string
```
32. Write a program that given a string and a char separator, returns a list of the words. (Tokenizer)
* Input: Anna,21,Programming
* Expected output:
```
["Anna", "21", "Programming"]
```
33. Write a program that given a list of strings, returns a list with the size of each string.
* Input: ["Anna", "21", "Programming"]
* Expected output:
```
[4, 2, 11]
```
34. Write a program that given a string and a number $k$, returns a list of chunked strings of size $k$.
* Input: "This is a very looooong string" and $k=3$
* Expected output:
```
['Thi', 's i', 's a', ' ve', 'ry ', 'loo', 'ooo', 'ng ', 'str', 'ing']
```
35. Write a program that given a string, returns a trimmed string (removing blankspaces at the beginning and at the end) and separating words with just one blankspace.
* Input: " Hello Mary "
* Expected output:
```
Hello Mary has 18 characters.
Hello Mary has 10 characters.
```
36. Write a program that given a string, an input character and a replacement character, returns a string replacing all occurrences of input character by the replacement character.
* Input: "Hello", input = "l", replacement ="t"
* Expected output:
```
Hello is now Hetto.
```
37. Write a program that given a string, counts and displays the number of unique characters.
* Input: "Hello"
* Expected output:
```
The number of unique characters is 4.
```
38. Write a program that given a list of strings and a char separator, displays a message containing each string separated by separator.
* Input: ["Hello", "Mary,", "How", "are", "you?"], separator = "#"
* Expected output:
```
Hello#Mary,#How#are#you?
```
39. Write a program that given a string and and input pattern (another string), checks if the string starts with the input pattern.
* Input: "Hello", pattern="He"
* Expected output:
```
True
```
40. Write a program that given a string and and input pattern (another string), checks if the string ends with the input pattern.
* Input: "Hello", pattern="lo"
* Expected output:
```
True
```
41. Write a program that given a string, filters all characters that are not numbers.
* Use the function `value.isdigit()`
* Input: "He2l3l4o5"
* Expected output:
```
['2', '3', '4', '5']
```
42. Write a program that given a list of integers and a value $k$, filters all numbers that are less than $k$.
* Input: [4, 15, 9, 21], $k=10$
* Expected output:
```
[4, 9]
```
43. Write a program that given a list of integers and a value $k$, removes the first apparition of the value from the list.
* Input: [4, 15, 9, 21], $k=15$
* Expected output:
```
[4, 9, 21]
```
44. Write a program that asks the user for introducing $k$ numbers and creates a list following a LIFO (Last Input First Output) strategy. Then, the program must extract and remove the elements following this strategy
* Input: 4, 5, 6, 7 ($k=4$)
* Expected output:
```
Stack: [4, 5, 6, 7]
Extracting: 7, Stack: [4, 5, 6, 7]
Extracting: 6, Stack: [4, 5, 6]
Extracting: 5, Stack: [4, 5]
Extracting: 4, Stack: [4]
```
45. Write a program that asks the user for introducing $k$ numbers and creates a list following a FIFO (First Input First Output) strategy. Then, the program must extract and remove the elements following this strategy
* Input: 4, 5, 6, 7 ($k=4$)
* Expected output:
```
Stack: [4, 5, 6, 7]
Extracting: 4, Stack: [4, 5, 6, 7]
Extracting: 5, Stack: [5, 6, 7]
Extracting: 6, Stack: [6, 7]
Extracting: 7, Stack: [7]
```
## Quick questions
**1. Define the lists x and y as lists of numbers.**
* x = [2, 4, 6, 8]
* y = [1, 3, 5, 7]
* What is the value of 2*x?
* What is the result of x+y?
* What is the result of x-y?
* What is the value of x[1]?
* What is the value of x[-1]?
* What is the value of x[:]?
* What is the value of x[2:4]?
* What is the value of x[1:4:2]?
* What is the value of x[:2]?
* What is the value of x[::2]?
* What is the result of the following two expressions? x[3]=8
**2. Define a string x = "Hello"**
* What is the value of 4*x?
* What is the value of x[1]?
* What is the value of x[-1]?
* What is the value of x[::2]?
* What is the value of x[::-1]?
## Quick review of list and string methods
### List: some relevant methods
```
#Review of list methods
#Create a list
values = []
#Append an element
values.append(1)
print(values)
#Access an element
print(values[0])
#Get number of elements
len(values)
print(len(values))
#Count the number of elements
print(values.count(1))
#Slicing [start:end:step], default start = 0, end = len(list), step = 1
#First k elements, k = 1
print(values[:1])
#List sort
print(values.sort())
#List reverse
values.reverse()
#Remove an element
del values [0]
#Remove all elements
values.clear()
```
### String: some relevant methods
```
#Review of string methods
value = " Hello, Mary "
print(len(value))
#Accessing
print(value[5])
#Cleaning
print(value.strip())
#Modifying values
print(value.upper())
print(value.lower())
#Finding and replacing
print("Hello".startswith("He"))
print("Hello".endswith("lo"))
print(value.find("H"))
print(value.replace(" ","#"))
#Check values
print("1".isdigit())
print("a".isalpha())
#Tokenizing
print(value.split(","))
```
| github_jupyter |
```
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import sentencepiece as spm
sp_model = spm.SentencePieceProcessor()
sp_model.Load('prepare/sp10m.cased.ms-en.model')
import tensorflow as tf
import tensorflow_text
import struct
unknown = b'\xff\xff\xff\xff'
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
for node in graph_def.node:
if node.op == 'RefSwitch':
node.op = 'Switch'
for index in xrange(len(node.input)):
if 'moving_' in node.input[index]:
node.input[index] = node.input[index] + '/read'
elif node.op == 'AssignSub':
node.op = 'Sub'
if 'use_locking' in node.attr: del node.attr['use_locking']
elif node.op == 'AssignAdd':
node.op = 'Add'
if 'use_locking' in node.attr: del node.attr['use_locking']
elif node.op == 'Assign':
node.op = 'Identity'
if 'use_locking' in node.attr: del node.attr['use_locking']
if 'validate_shape' in node.attr: del node.attr['validate_shape']
if len(node.input) == 2:
node.input[0] = node.input[1]
del node.input[1]
if 'Reshape/shape' in node.name or 'Reshape_1/shape' in node.name:
b = node.attr['value'].tensor.tensor_content
arr_int = [int.from_bytes(b[i:i + 4], 'little') for i in range(0, len(b), 4)]
if len(arr_int):
arr_byte = [unknown] + [struct.pack('<i', i) for i in arr_int[1:]]
arr_byte = b''.join(arr_byte)
node.attr['value'].tensor.tensor_content = arr_byte
if len(node.attr['value'].tensor.int_val):
node.attr['value'].tensor.int_val[0] = -1
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
return graph
import json
with open('prepare/test-set-segmentation.json') as fopen:
data = json.load(fopen)
X, Y = [], []
for x, y in data:
X.append(x)
Y.append(y)
g = load_graph('super-tiny-segmentation/frozen_model.pb')
x = g.get_tensor_by_name('import/inputs:0')
logits = g.get_tensor_by_name('import/SelectV2_3:0')
test_sess = tf.InteractiveSession(graph = g)
from tqdm import tqdm
batch_size = 10
results = []
for i in tqdm(range(0, len(X), batch_size)):
batch_x = X[i: i + batch_size]
batches = []
for b in batch_x:
batches.append(f'segmentasi: {b}')
g = test_sess.run(logits, feed_dict = {x:batches})
results.extend(g.tolist())
results_Y = [sp_model.DecodeIds(r) for r in results]
results_Y[0], Y[0]
def calculate_wer(actual, hyp):
"""
Calculate WER using `python-Levenshtein`.
"""
import Levenshtein as Lev
b = set(actual.split() + hyp.split())
word2char = dict(zip(b, range(len(b))))
w1 = [chr(word2char[w]) for w in actual.split()]
w2 = [chr(word2char[w]) for w in hyp.split()]
return Lev.distance(''.join(w1), ''.join(w2)) / len(actual.split())
wer = []
for i in tqdm(range(len(results_Y))):
wer.append(calculate_wer(Y[i], results_Y[i]))
import numpy as np
np.mean(wer)
```
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
# Distributed Tensorflow with Horovod
In this tutorial, you will train a word2vec model in TensorFlow using distributed training via [Horovod](https://github.com/uber/horovod).
## Prerequisites
* Understand the [architecture and terms](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture) introduced by Azure Machine Learning (AML)
* Go through the [configuration notebook](../../../configuration.ipynb) to:
* install the AML SDK
* create a workspace and its configuration file (`config.json`)
* Review the [tutorial](../train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb) on single-node TensorFlow training using the SDK
```
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
```
## Diagnostics
Opt-in diagnostics for better experience, quality, and security of future releases.
```
from azureml.telemetry import set_diagnostics_collection
set_diagnostics_collection(send_diagnostics=True)
```
## Initialize workspace
Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`.
```
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep='\n')
```
## Create or Attach existing AmlCompute
You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, you create `AmlCompute` as your training compute resource.
**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace this code will skip the creation process.
As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.
```
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# choose a name for your cluster
cluster_name = "gpucluster"
try:
compute_target = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing compute target')
except ComputeTargetException:
print('Creating a new compute target...')
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',
max_nodes=4)
# create the cluster
compute_target = ComputeTarget.create(ws, cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
# use get_status() to get a detailed status for the current cluster.
print(compute_target.get_status().serialize())
```
The above code creates a GPU cluster. If you instead want to create a CPU cluster, provide a different VM size to the `vm_size` parameter, such as `STANDARD_D2_V2`.
## Upload data to datastore
To make data accessible for remote training, AML provides a convenient way to do so via a [Datastore](https://docs.microsoft.com/azure/machine-learning/service/how-to-access-data). The datastore provides a mechanism for you to upload/download data to Azure Storage, and interact with it from your remote compute targets.
If your data is already stored in Azure, or you download the data as part of your training script, you will not need to do this step. For this tutorial, although you can download the data in your training script, we will demonstrate how to upload the training data to a datastore and access it during training to illustrate the datastore functionality.
First, download the training data from [here](http://mattmahoney.net/dc/text8.zip) to your local machine:
```
import os
import urllib
os.makedirs('./data', exist_ok=True)
download_url = 'http://mattmahoney.net/dc/text8.zip'
urllib.request.urlretrieve(download_url, filename='./data/text8.zip')
```
Each workspace is associated with a default datastore. In this tutorial, we will upload the training data to this default datastore.
```
ds = ws.get_default_datastore()
print(ds.datastore_type, ds.account_name, ds.container_name)
```
Upload the contents of the data directory to the path `./data` on the default datastore.
```
ds.upload(src_dir='data', target_path='data', overwrite=True, show_progress=True)
```
For convenience, let's get a reference to the path on the datastore with the zip file of training data. We can do so using the `path` method. In the next section, we can then pass this reference to our training script's `--input_data` argument.
```
path_on_datastore = 'data/text8.zip'
ds_data = ds.path(path_on_datastore)
print(ds_data)
```
## Train model on the remote compute
### Create a project directory
Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script, and any additional files your training script depends on.
```
project_folder = './tf-distr-hvd'
os.makedirs(project_folder, exist_ok=True)
```
Copy the training script `tf_horovod_word2vec.py` into this project directory.
```
import shutil
shutil.copy('tf_horovod_word2vec.py', project_folder)
```
### Create an experiment
Create an [Experiment](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#experiment) to track all the runs in your workspace for this distributed TensorFlow tutorial.
```
from azureml.core import Experiment
experiment_name = 'tf-distr-hvd'
experiment = Experiment(ws, name=experiment_name)
```
### Create a TensorFlow estimator
The AML SDK's TensorFlow estimator enables you to easily submit TensorFlow training jobs for both single-node and distributed runs. For more information on the TensorFlow estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-tensorflow).
```
from azureml.train.dnn import TensorFlow
script_params={
'--input_data': ds_data
}
estimator= TensorFlow(source_directory=project_folder,
compute_target=compute_target,
script_params=script_params,
entry_script='tf_horovod_word2vec.py',
node_count=2,
process_count_per_node=1,
distributed_backend='mpi',
use_gpu=True)
```
The above code specifies that we will run our training script on `2` nodes, with one worker per node. In order to execute a distributed run using MPI/Horovod, you must provide the argument `distributed_backend='mpi'`. Using this estimator with these settings, TensorFlow, Horovod and their dependencies will be installed for you. However, if your script also uses other packages, make sure to install them via the `TensorFlow` constructor's `pip_packages` or `conda_packages` parameters.
Note that we passed our training data reference `ds_data` to our script's `--input_data` argument. This will 1) mount our datastore on the remote compute and 2) provide the path to the data zip file on our datastore.
### Submit job
Run your experiment by submitting your estimator object. Note that this call is asynchronous.
```
run = experiment.submit(estimator)
print(run)
```
### Monitor your run
You can monitor the progress of the run with a Jupyter widget. Like the run submission, the widget is asynchronous and provides live updates every 10-15 seconds until the job completes.
```
from azureml.widgets import RunDetails
RunDetails(run).show()
```
Alternatively, you can block until the script has completed training before running more code.
```
run.wait_for_completion(show_output=True)
```
| github_jupyter |
# Imports
```
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.externals import joblib
import torch
import torchvision
import torchvision.transforms as transforms
import pickle
import pandas as pd
import os
sys.path.append('../../Utils')
from SVC_Utils import *
```
# Load CIFAR100
```
def unpickle(file):
with open(file, 'rb') as fo:
res = pickle.load(fo, encoding='bytes')
return res
transform = transforms.Compose(
[transforms.ToTensor()])
#training data;
trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transforms.ToTensor())
trainloader = torch.utils.data.DataLoader(trainset, batch_size=int((trainset.__len__())/2), shuffle=True, num_workers=2)
trainloader_final=torch.utils.data.DataLoader(trainset, batch_size=trainset.__len__(), shuffle=True, num_workers=2)
#test data
testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transforms.ToTensor())
testloader = torch.utils.data.DataLoader(testset, batch_size=testset.__len__(),shuffle=False, num_workers=2)
classes=None
traininputs, traintargets=load(trainloader)
testinputs, testtargets=load(testloader)
ftraininputs, ftraintargets=load(trainloader_final)
```
# Model Training
```
n_components=180
C_range=np.logspace(0,1,2)
gamma_range=np.logspace(-2,-1,2)
clfs=hp_grid(n_components=n_components, C_range=C_range, gamma_range=gamma_range)
#fitted_clfs=train_grid(clfs, traininputs, traintargets)
fitted_clfs=joblib.load('fclfs')
```
# Model Testing/Evaluation
```
#Stores training and testing accuracies in matrices (Rows: C_range, Cols: gamma_range)
train_accs=np.random.randn(len(C_range),len(gamma_range))
test_accs=np.random.randn(len(C_range),len(gamma_range))
test_preds=[]
k=0;
for i in range(len(C_range)):
for j in range(len(gamma_range)):
train_accs[i,j]=predict_eval(fitted_clfs[k], traininputs, traintargets, training=True)[1]
preds, test_accs[i,j]=predict_eval(fitted_clfs[k], testinputs, testtargets)
test_preds.append(preds)
k+=1
idx=['C = 1','C = 10']
cols=['gamma = .01','gamma = .1']
trainacc_df=pd.DataFrame(data=train_accs, index=idx, columns=cols)
testacc_df=pd.DataFrame(data=test_accs, index=idx, columns=cols)
#training accuracy for C/gamma grid
trainacc_df.style.background_gradient(cmap='GnBu')
#test accuracy for C/gamma grid
testacc_df.style.background_gradient(cmap='GnBu')
```
# Save Model
```
maxacc, gen=maxacc_gen(test_accs, train_accs, clfs)
fn_max_acc = 'SVMCIFAR100_maxacc_proba.pkl'
fn_gen = 'SVMCIFAR100_gen_proba.pkl'
print(maxacc)
save_proba(fn_max_acc, maxacc, traininputs, traintargets)
save_proba(fn_gen, gen, traininputs, traintargets)
```
| github_jupyter |
# Table of Contents
<p><div class="lev1 toc-item"><a href="#Initialize-Environment" data-toc-modified-id="Initialize-Environment-1"><span class="toc-item-num">1 </span>Initialize Environment</a></div><div class="lev1 toc-item"><a href="#Load-Toy-Data" data-toc-modified-id="Load-Toy-Data-2"><span class="toc-item-num">2 </span>Load Toy Data</a></div><div class="lev1 toc-item"><a href="#Measure-Functional-Connectivity" data-toc-modified-id="Measure-Functional-Connectivity-3"><span class="toc-item-num">3 </span>Measure Functional Connectivity</a></div><div class="lev1 toc-item"><a href="#Optimize-Dynamic-Subgraphs-Parameters" data-toc-modified-id="Optimize-Dynamic-Subgraphs-Parameters-4"><span class="toc-item-num">4 </span>Optimize Dynamic Subgraphs Parameters</a></div><div class="lev2 toc-item"><a href="#Generate-Cross-Validation-Parameter-Sets" data-toc-modified-id="Generate-Cross-Validation-Parameter-Sets-41"><span class="toc-item-num">4.1 </span>Generate Cross-Validation Parameter Sets</a></div><div class="lev2 toc-item"><a href="#Run-NMF-Cross-Validation-Parameter-Sets" data-toc-modified-id="Run-NMF-Cross-Validation-Parameter-Sets-42"><span class="toc-item-num">4.2 </span>Run NMF Cross-Validation Parameter Sets</a></div><div class="lev2 toc-item"><a href="#Visualize-Quality-Measures-of-Search-Space" data-toc-modified-id="Visualize-Quality-Measures-of-Search-Space-43"><span class="toc-item-num">4.3 </span>Visualize Quality Measures of Search Space</a></div><div class="lev1 toc-item"><a href="#Detect-Dynamic-Subgraphs" data-toc-modified-id="Detect-Dynamic-Subgraphs-5"><span class="toc-item-num">5 </span>Detect Dynamic Subgraphs</a></div><div class="lev2 toc-item"><a href="#Stochastic-Factorization-with-Consensus" data-toc-modified-id="Stochastic-Factorization-with-Consensus-51"><span class="toc-item-num">5.1 </span>Stochastic Factorization with Consensus</a></div><div class="lev2 toc-item"><a href="#Plot--Subgraphs-and-Spectrotemporal-Dynamics" data-toc-modified-id="Plot--Subgraphs-and-Spectrotemporal-Dynamics-52"><span class="toc-item-num">5.2 </span>Plot Subgraphs and Spectrotemporal Dynamics</a></div>
# Initialize Environment
```
from __future__ import division
import os
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['NUMEXPR_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
import sys
# Data manipulation
import numpy as np
import scipy.io as io
import NMF
# Echobase
sys.path.append('../Echobase/')
import Echobase
# Plotting
import matplotlib.pyplot as plt
import seaborn as sns
```
# Load Toy Data
```
# df contains the following keys:
# -- evData contains ECoG with dims: n_sample x n_channels
# -- Fs contains sampling frequency: 1 x 1
# -- channel_lbl contains strings of channel labels with dims: n_channels
# -- channel_ix_soz contains indices of seizure-onset channels: n_soz
df = io.loadmat('./ToyData/Seizure_ECoG.mat')
evData = df['evData']
fs = int(df['Fs'][0,0])
n_sample, n_chan = evData.shape
```
# Measure Functional Connectivity
```
def compute_dynamic_windows(n_sample, fs, win_dur=1.0, win_shift=1.0):
"""
Divide samples into bins based on window duration and shift.
Parameters
----------
n_sample: int
Number of samples
fs: int
Sampling frequency
win_dur: float
Duration of the dynamic window
win_shift: float
Shift of the dynamic window
Returns
-------
win_ix: ndarray with dims: (n_win, n_ix)
"""
n_samp_per_win = int(fs * win_dur)
n_samp_per_shift = int(fs * win_shift)
curr_ix = 0
win_ix = []
while (curr_ix+n_samp_per_win) <= n_sample:
win_ix.append(np.arange(curr_ix, curr_ix+n_samp_per_win))
curr_ix += n_samp_per_shift
win_ix = np.array(win_ix)
return win_ix
# Transform to a configuration matrix (n_window x n_connection)
triu_ix, triu_iy = np.triu_indices(n_chan, k=1)
n_conn = len(triu_ix)
# Measure dynamic functional connectivity using Echobase
#win_bin = compute_dynamic_windows(n_sample, fs)
win_bin = compute_dynamic_windows(fs*100, fs)
n_win = win_bin.shape[0]
n_fft = win_bin.shape[1] // 2
# Notch filter the line-noise
fft_freq = np.linspace(0, fs // 2, n_fft)
notch_60hz = ((fft_freq > 55.0) & (fft_freq < 65.0))
notch_120hz = ((fft_freq > 115.0) & (fft_freq < 125.0))
notch_180hz = ((fft_freq > 175.0) & (fft_freq < 185.0))
fft_freq_ix = np.setdiff1d(np.arange(n_fft),
np.flatnonzero(notch_60hz | notch_120hz | notch_180hz))
fft_freq = fft_freq[fft_freq_ix]
n_freq = len(fft_freq_ix)
# Compute dFC
A_tensor = np.zeros((n_win, n_freq, n_conn))
for w_ii, w_ix in enumerate(win_bin):
evData_hat = evData[w_ix, :]
evData_hat = Echobase.Sigproc.reref.common_avg_ref(evData_hat)
for tr_ii, (tr_ix, tr_iy) in enumerate(zip(triu_ix, triu_iy)):
out = Echobase.Pipelines.ecog_network.coherence.mt_coherence(
df=1.0/fs, xi=evData_hat[:, tr_ix], xj=evData_hat[:, tr_iy],
tbp=5.0, kspec=9, nf=n_fft,
p=0.95, iadapt=1,
cohe=True, freq=True)
A_tensor[w_ii, :, tr_ii] = out['cohe'][fft_freq_ix]
A_hat = A_tensor.reshape(-1, n_conn)
```
# Optimize Dynamic Subgraphs Parameters
## Generate Cross-Validation Parameter Sets
```
def generate_folds(n_win, n_fold):
"""
Generate folds for cross-validation by randomly dividing the windows
into different groups for train/test-set.
Parameters
----------
n_win: int
Number of windows (observations) in the configuration matrix
n_fold: int
Number of folds desired
Returns
-------
fold_list: list[list]
List of index lists that can be further divided into train
and test sets
"""
# discard incomplete folds
n_win_per_fold = int(np.floor(n_win / n_fold))
win_list = np.arange(n_win)
win_list = np.random.permutation(win_list)
win_list = win_list[:(n_win_per_fold*n_fold)]
win_list = win_list.reshape(n_fold, -1)
fold_list = [list(ff) for ff in win_list]
return fold_list
fold_list = generate_folds(n_win, n_fold=5)
# Set the bounds of the search space
# Random sampling scheme
param_search_space = {'rank_range': (2, 20),
'alpha_range': (0.01, 1.0),
'beta_range': (0.01, 1.0),
'n_param': 20}
# Get parameter search space
# Each sampled parameter set will be evaluated n_fold times
param_list = NMF.optimize.gen_random_sampling_paramset(
fold_list=fold_list,
**param_search_space)
```
## Run NMF Cross-Validation Parameter Sets
```
# **This cell block should be parallelized. Takes time to run**
# Produces a list of quality measures for each parameter set in param_list
qmeas_list = [NMF.optimize.run_xval_paramset(A_hat, pdict)
for pdict in param_list]
```
## Visualize Quality Measures of Search Space
```
all_param, opt_params = NMF.optimize.find_optimum_xval_paramset(param_list, qmeas_list, search_pct=5)
# Generate quality measure plots
for qmeas in ['error', 'pct_sparse_subgraph', 'pct_sparse_coef']:
for param in ['rank', 'alpha', 'beta']:
param_unq = np.unique(all_param[param])
qmeas_mean = [np.mean(all_param[qmeas][all_param[param]==pp]) for pp in param_unq]
ax_jp = sns.jointplot(all_param[param], all_param[qmeas], kind='kde',
space=0, n_levels=60, shade_lowest=False)
ax = ax_jp.ax_joint
ax.plot([opt_params[param], opt_params[param]],
[ax.get_ylim()[0], ax.get_ylim()[1]],
lw=1.0, alpha=0.75, linestyle='--')
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_xlabel(param)
ax.set_ylabel(qmeas)
plt.show()
plt.close()
```
# Detect Dynamic Subgraphs
## Stochastic Factorization with Consensus
```
def refactor_connection_vector(conn_vec):
n_node = int(np.ceil(np.sqrt(2*len(conn_vec))))
triu_ix, triu_iy = np.triu_indices(n_node, k=1)
adj = np.zeros((n_node, n_node))
adj[triu_ix, triu_iy] = conn_vec[...]
adj += adj.T
return adj
fac_subgraph, fac_coef, err = NMF.optimize.consensus_nmf(A_hat, n_seed=2, n_proc=1,
opt_alpha=opt_params['alpha'],
opt_beta=opt_params['beta'],
opt_rank=opt_params['rank'])
fac_subgraph = np.array([refactor_connection_vector(subg)
for subg in fac_subgraph])
fac_coef = fac_coef.reshape(-1, n_win, n_freq)
```
## Plot Subgraphs and Spectrotemporal Dynamics
```
n_row = fac_subgraph.shape[0]
n_col = 2
plt.figure(figsize=(12,36))
for fac_ii in xrange(fac_subgraph.shape[0]):
ax = plt.subplot(n_row, n_col, 2*fac_ii+1)
ax.matshow(fac_subgraph[fac_ii, ...] / fac_subgraph.max(), cmap='viridis')
ax.set_axis_off()
ax = plt.subplot(n_row, n_col, 2*fac_ii+2)
ax.matshow(fac_coef[fac_ii, ...].T / fac_coef.max(), aspect=n_win/n_freq, cmap='inferno')
plt.show()
```
| github_jupyter |
## Exercise L3 - 1: Diagnose Dataset Level and Select Last Encounter
### Instructions
- Given the dataset, convert the dataset to a longitudinal level but select only the last encounter for each patient.
- Assume that that the order of encounter IDs is indicative of the time for encounter. In other words a lower number encounter will come before a higher numbered encounter.
```
import pandas as pd
import numpy as np
ehr_level_dataset_path = "./data/ehr_level_exercise_dataset.csv"
```
### Level of Dataset
What level is the dataset at? Is at the line or encounter level?
### Solution
```
ehr_level_df = pd.read_csv(ehr_level_dataset_path)
ehr_level_df.head()
```
**Tests**
- Line: Total number of rows > Number of Unique Encounters
- Encounter level: Total Number of Rows = Number of Unique Encounters
```
# Line Test
try:
assert len(ehr_level_df) > ehr_level_df['ENCOUNTER_ID'].nunique()
print("Dataset could be at the line level")
except:
print("Dataset is not at the line level")
# Encounter Test
try:
assert len(ehr_level_df) == ehr_level_df['ENCOUNTER_ID'].nunique()
print("Dataset could be at the encounter level")
except:
print("Dataset is not at the encounter level")
```
**Answer:** Dataset is at the encounter level and you can probably guess by seeing the arrays for the code sets but we did a few simple tests to confirm.
### Select Last Encounter for each Patient
So in many cases you may only want a snapshot of a patient's history for your modeling objective. In some cases it might be important to see the changes over time but in other cases you only want the most recent case or depending on the model the first case could also be used. Really important to know how the context for how the model will be deployed in production and the time state you will be getting data.
```
# select last encounter for each patient
#convert encounter id column to a numerical value
def convert_encounter_id_to_number(df, encounter_id):
df["ENCOUNTER_ID_NUMBER"] = df[encounter_id].str.replace('udacity_health_encounter_id_', '').astype(int)
return df
def select_last_encounter(df, patient_id, encounter_id):
df = df.sort_values(encounter_id)
last_encounter_values = df.groupby(patient_id)[encounter_id].tail(1).values
return df[df[encounter_id].isin(last_encounter_values)]
ehr_encounter_number_df = convert_encounter_id_to_number(ehr_level_df, "ENCOUNTER_ID")
last_encounter_df = select_last_encounter(ehr_encounter_number_df, "PATIENT_ID", "ENCOUNTER_ID_NUMBER" )
#take subset of output
test_last_encounter_df = last_encounter_df[['ENCOUNTER_ID', 'ENCOUNTER_ID_NUMBER', 'PATIENT_ID']]
```
### Test cases
- PATIENT_IDS - udacity_health_patient_id_309, udacity_health_patient_id_418, udacity_health_patient_id_908
```
ehr_level_df[ehr_level_df['PATIENT_ID']=='udacity_health_patient_id_309']
```
For patient id 309, the selected encounter should be 7772.
```
test_last_encounter_df[test_last_encounter_df['PATIENT_ID']=='udacity_health_patient_id_309']
ehr_level_df[ehr_level_df['PATIENT_ID']=='udacity_health_patient_id_418']
```
For patient id 418, the selected encounter should be 3362.
```
test_last_encounter_df[test_last_encounter_df['PATIENT_ID']=='udacity_health_patient_id_418']
ehr_level_df[ehr_level_df['PATIENT_ID']=='udacity_health_patient_id_908']
```
For patient id 908, the selected encounter should be 6132.
```
test_last_encounter_df[test_last_encounter_df['PATIENT_ID']=='udacity_health_patient_id_908']
```
## Exercise L3 - 2: Dataset Splitting
### Instructions
- Split the provided dataset into a train and test split but be sure not to mix patient encounter records across the two partitions
- Be sure to run the following three tests
- Patient data in only one partition
- Total unique number of patients across all partitions = total number unique patients in original full dataset
- Total number of rows original dataset = sum of rows across splits
```
splitting_exercise_dataset_path = "./data/SYNTHETIC_EHR_DATASET.csv"
```
### Solution
This is largely a review of two parts in this lesson and you can use most of the same code for each step. The key is to identify the level of the dataset and then to convert it to the encounter level before you do your splits. Then perform the splitting and run the tests.
#### Convert to Encounter Level
```
# convert to encounter and then split but make sure
ehr_pre_split_df = pd.read_csv(splitting_exercise_dataset_path)
grouping_field_list = ['ENCOUNTER_ID', 'PATIENT_ID', 'PRINCIPAL_DIAGNOSIS_CODE']
non_grouped_field_list = [c for c in ehr_pre_split_df.columns if c not in grouping_field_list]
ehr_encounter_df = ehr_pre_split_df.groupby(grouping_field_list)[non_grouped_field_list].agg(lambda x:
list([y for y in x if y is not np.nan ] ) ).reset_index()
```
#### Split at Patient Level
```
PATIENT_ID_FIELD = 'PATIENT_ID'
TEST_PERCENTAGE = 0.2
def split_dataset_patient_level(df, key, test_percentage=0.2):
df = df.iloc[np.random.permutation(len(df))]
unique_values = df[key].unique()
total_values = len(unique_values)
sample_size = round(total_values * (1 - test_percentage ))
train = df[df[key].isin(unique_values[:sample_size])].reset_index(drop=True)
test = df[df[key].isin(unique_values[sample_size:])].reset_index(drop=True)
return train, test
train_df, test_df = split_dataset_patient_level(ehr_encounter_df, PATIENT_ID_FIELD, TEST_PERCENTAGE)
assert len(set(train_df[PATIENT_ID_FIELD].unique()).intersection(set(test_df[PATIENT_ID_FIELD].unique()))) == 0
print("Test passed for patient data in only one partition")
assert (train_df[PATIENT_ID_FIELD].nunique() + test_df[PATIENT_ID_FIELD].nunique()) == ehr_encounter_df[PATIENT_ID_FIELD].nunique()
print("Test passed for number of unique patients being equal!")
assert len(train_df) + len(test_df) == len(ehr_encounter_df)
print("Test passed for number of total rows equal!")
```
#### Optional
- Check label distribution and use scikitlearn - https://scikit-learn.org/stable/auto_examples/model_selection/plot_cv_indices.html#sphx-glr-auto-examples-model-selection-plot-cv-indices-py
## Exercise L3 - 3: Build Bucketed Numeric Feature with TF
### Instructions
- Given the Swiss heart disease dataset that we worked with earlier, build a bucketed numeric feature from the age feature.
- For this exercise, use the Tensorflow csv function for loading the dataset directly into a TF tensor -https://www.tensorflow.org/api_docs/python/tf/data/experimental/make_csv_dataset. This approach will be useful for when you have much larger datasets and also allows you to bypass loading the dataset in Pandas.
- More information on the Tensorflow bucketized feature can be found here https://www.tensorflow.org/api_docs/python/tf/feature_column/bucketized_column. Bucketed features take as input the numeric feature that we covered in the lesson. For the numeric feature, you do not need to normalize it like we did in the lesson.
```
import tensorflow as tf
swiss_dataset_path = "./data/lesson_exercise_swiss_dataset.csv"
BATCH_SIZE =128
PREDICTOR_FIELD = 'num_label'
```
### Solution
```
# ETL with TF dataset make csv function
swiss_tf_dataset = tf.data.experimental.make_csv_dataset( swiss_dataset_path, batch_size=BATCH_SIZE,
num_epochs=1, label_name=PREDICTOR_FIELD, header=True)
swiss_dataset_batch = next(iter(swiss_tf_dataset))[0]
swiss_dataset_batch
# create TF numeric feature
tf_numeric_age_feature = tf.feature_column.numeric_column(key='age', default_value=0, dtype=tf.float64)
#boundaries for the different age buckets
b_list = [ 0, 18, 25, 40, 55, 65, 80, 100]
#create TF bucket feature from numeric feature
tf_bucket_age_feature = tf.feature_column.bucketized_column(source_column=tf_numeric_age_feature, boundaries= b_list)
def demo(feature_column, example_batch):
feature_layer = tf.keras.layers.DenseFeatures(feature_column)
print(feature_layer(example_batch))
print("\nExample of one transformed row:")
print(feature_layer(example_batch).numpy()[0])
print("Example bucket field:\n{}\n".format(tf_bucket_age_feature))
demo(tf_bucket_age_feature, swiss_dataset_batch)
```
## Exercise L3 - 4: Build Embedding Categorical Feature with TF
### Instructions
- Build a 10 dimension embedding feature for the PRINCIPAL_DIAGNOSIS_CODE field
- Here is the link to the Tensorflow Embedding column documentation -https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column
- Some functions provided below to assist
```
ehr_line_df = pd.read_csv("./data/SYNTHETIC_EHR_DATASET.csv")
cat_example_df = ehr_line_df[['ENCOUNTER_ID', 'PRINCIPAL_DIAGNOSIS_CODE', 'LABEL']]
#adapted from https://www.tensorflow.org/tutorials/structured_data/feature_columns
def df_to_dataset(df, predictor, batch_size=32):
df = df.copy()
labels = df.pop(predictor)
ds = tf.data.Dataset.from_tensor_slices((dict(df), labels))
ds = ds.shuffle(buffer_size=len(df))
ds = ds.batch(batch_size)
return ds
BATCH_SIZE = 64
PREDICTOR_FIELD = 'LABEL'
categorical_tf_ds = df_to_dataset(cat_example_df, PREDICTOR_FIELD, batch_size=BATCH_SIZE)
# build vocab for categorical features
def write_vocabulary_file(vocab_list, field_name, default_value, vocab_dir='./vocab/'):
output_file_path = os.path.join(vocab_dir, str(field_name) + "_vocab.txt")
# put default value in first row as TF requires
vocab_list = np.insert(vocab_list, 0, default_value, axis=0)
df = pd.DataFrame(vocab_list).to_csv(output_file_path, index=None, header=None)
return output_file_path
def build_vocab_files(df, categorical_column_list, default_value='00'):
vocab_files_list = []
for c in categorical_column_list:
v_file = write_vocabulary_file(df[c].unique(), c, default_value)
vocab_files_list.append(v_file)
return vocab_files_list
```
### Solution
```
import os
# add logic to add if not exist
#os.mkdir("./vocab/")
categorical_field_list = ["PRINCIPAL_DIAGNOSIS_CODE"]
vocab_files_list = build_vocab_files(cat_example_df, categorical_field_list)
vocab_files_list[0]
principal_diagnosis_vocab = tf.feature_column.categorical_column_with_vocabulary_file(
key="PRINCIPAL_DIAGNOSIS_CODE", vocabulary_file = vocab_files_list[0], num_oov_buckets=1)
dims = 10
cat_embedded = tf.feature_column.embedding_column(principal_diagnosis_vocab, dimension=dims)
categorical_tf_ds_batch = next(iter(categorical_tf_ds))[0]
demo(cat_embedded, categorical_tf_ds_batch)
```
| github_jupyter |
Generalized Method of Moments
=============================
*Generalized method of moments* (GMM) is an estimation principle that
extends *method of moments*. It seeks the parameter that minimizes a
quadratic form of the moments. It is particularly useful in estimating
structural models in which moment conditions can be derived from
economic theory. GMM emerges as one of the most popular estimators in
modern econometrics, and it includes conventional methods like the
two-stage least squares (2SLS) and the three-stage least square as
special cases.
**R Example**
The CRAN packge [gmm](http://cran.r-project.org/web/packages/gmm/index.html) provides an interface for GMM estimation. In this document we demonstrate it in a nonlinear model.
[Bruno Rodrigues](http://www.brodrigues.co/pages/aboutme.html) shared [his example](http://www.brodrigues.co/blog/2013-11-07-gmm-with-rmd/) with detailed instruction and discussion.
(update: as Aug 19, 2018, his linked data no longer works. I track to the original dataset and do the conversion to make it work.)
Unfortunately, I find his example cannot reflect the essence of GMM. The blunder was that he took the *method of moments* as the *generalized method of moments*. He worked with a just-identified model, in which the choices of **type** and **wmatrix** in his call
```
my_gmm <- gmm(moments, x = dat, t0 = init, type = "iterative", crit = 1e-25, wmatrix = "optimal", method = "Nelder-Mead", control = list(reltol = 1e-25, maxit = 20000))
```
is simplily irrelevant. Experimenting with different options of **type** and **wmatrix**, we will find exactly the same point estimates and variances.
Below I illustrate the nonlinear GMM in an over-identified system. First we import the data and add a constant.
```
# load the data
library(Ecdat, quietly = TRUE, warn.conflicts = FALSE)
data(Benefits)
g = Benefits
g$const <- 1 # add the constant
g1 <- g[, c("ui", "const", "age", "dkids", "dykids", "head", "sex", "married", "rr") ]
head(g)
# to change the factors into numbers
for (j in c(1, 4, 5, 6, 7, 8) ){
g1[,j] = as.numeric( g1[,j] ) -1
}
```
R's OLS function **lm** adds the intercept in the default setting. In contrast,we have to specify the moments from scratch in **gmm**. The constant, a column of ones, must be included explicitly in the data matrix.
Next, we define the logistic function and the moment conditions.
```
logistic <- function(theta, data) {
return(1/(1 + exp(-data %*% theta)))
}
moments <- function(theta, data) {
y <- as.numeric(data[, 1])
x <- data.matrix(data[, c(2:3, 6:8)])
z <- data.matrix( data[, c(2,4, 5:9) ] ) # more IVs than the regressors. Over-identified.
m <- z * as.vector((y - logistic(theta, x)))
return(cbind(m))
}
```
Here I naively adapt Bruno Rodrigues's example and specify the momemts as
$$
E[z_i \epsilon_i] = E[ z_i ( y_i - \mathrm{ logistic }(x_i \beta ) )] = 0
$$
However, such a specification is almost impossible to be motivated from the economic theory of random utility models.
Eventually, we call the GMM function and display the results. An initial value must be provided for a numerical optimization algorithm. It is recommended to try at least dozens of initial values in general unless one can show that the minimizer is unique in the model.
```
library(gmm) # load the library "gmm"
init <- (lm(ui ~ age + dkids + head + sex, data = g1 ))$coefficients
my_gmm <- gmm(moments, x = g1, t0 = init, type = "twoStep", wmatrix = "optimal")
summary(my_gmm)
```
In the summary, the $J$ statistics indicates that the moment conditions are unlikely to hold. The model requires further modification.
P.S.: According to my personal experience, caution must be executed when using **gmm** in R for nonlinear models. Sometimes the estimates can be unreliable, perhaps due to the shape of the criterion function in several parameters. Simulation experiments are highly suggested before we believe the estimates.
| github_jupyter |

<div class = 'alert alert-block alert-info'
style = 'background-color:#4c1c84;
color:#eeebf1;
border-width:5px;
border-color:#4c1c84;
font-family:Comic Sans MS;
border-radius: 50px 50px'>
<p style = 'font-size:24px'>Exp 033</p>
<a href = "#Config"
style = "color:#eeebf1;
font-size:14px">1.Config</a><br>
<a href = "#Settings"
style = "color:#eeebf1;
font-size:14px">2.Settings</a><br>
<a href = "#Data-Load"
style = "color:#eeebf1;
font-size:14px">3.Data Load</a><br>
<a href = "#Pytorch-Settings"
style = "color:#eeebf1;
font-size:14px">4.Pytorch Settings</a><br>
<a href = "#Training"
style = "color:#eeebf1;
font-size:14px">5.Training</a><br>
</div>
<p style = 'font-size:24px;
color:#4c1c84'>
実施したこと
</p>
<li style = "color:#4c1c84;
font-size:14px">使用データ:Jigsaw-Classification</li>
<li style = "color:#4c1c84;
font-size:14px">使用モデル:unitary/toxic-bert</li>
<li style = "color:#4c1c84;
font-size:14px">Attentionの可視化</li>
<br>
<h1 style = "font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #4c1c84 ; color : #eeebf1; text-align: center; border-radius: 100px 100px;">
Config
</h1>
<br>
```
import sys
sys.path.append("../src/utils/iterative-stratification/")
sys.path.append("../src/utils/detoxify")
sys.path.append("../src/utils/coral-pytorch/")
sys.path.append("../src/utils/pyspellchecker")
import warnings
warnings.simplefilter('ignore')
import os
import gc
gc.enable()
import sys
import glob
import copy
import math
import time
import random
import string
import psutil
import pathlib
from pathlib import Path
from contextlib import contextmanager
from collections import defaultdict
from box import Box
from typing import Optional
from pprint import pprint
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import japanize_matplotlib
from tqdm.auto import tqdm as tqdmp
from tqdm.autonotebook import tqdm as tqdm
tqdmp.pandas()
## Model
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import StratifiedKFold, KFold
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from transformers import AutoTokenizer, AutoModel, AdamW, AutoModelForSequenceClassification
from transformers import RobertaModel, RobertaForSequenceClassification
from transformers import RobertaTokenizer
from transformers import LukeTokenizer, LukeModel, LukeConfig
from transformers import get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup
from transformers import BertTokenizer, BertForSequenceClassification, BertForMaskedLM
from transformers import RobertaTokenizer, RobertaForSequenceClassification
from transformers import XLMRobertaTokenizer, XLMRobertaForSequenceClassification
from transformers import DebertaTokenizer, DebertaModel
# Pytorch Lightning
import pytorch_lightning as pl
from pytorch_lightning.utilities.seed import seed_everything
from pytorch_lightning import callbacks
from pytorch_lightning.callbacks.progress import ProgressBarBase
from pytorch_lightning import LightningDataModule, LightningDataModule
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, LearningRateMonitor
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.loggers.csv_logs import CSVLogger
from pytorch_lightning.callbacks import RichProgressBar
from sklearn.linear_model import Ridge
from sklearn.svm import SVC, SVR
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy.stats import rankdata
from cuml.svm import SVR as cuml_SVR
from cuml.linear_model import Ridge as cuml_Ridge
import cudf
from detoxify import Detoxify
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
from ast import literal_eval
from nltk.tokenize import TweetTokenizer
import spacy
from scipy.stats import sem
from copy import deepcopy
from spellchecker import SpellChecker
from typing import Text, Set, List
import torch
config = {
"exp_comment":"Jigsaw-Classification をHateBERTで学習",
"seed": 42,
"root": "/content/drive/MyDrive/kaggle/Jigsaw/raw",
"n_fold": 5,
"epoch": 5,
"max_length": 256,
"environment": "AWS",
"project": "Jigsaw",
"entity": "dataskywalker",
"exp_name": "032_exp",
"margin": 0.5,
"train_fold": [0, 1, 2, 3, 4],
"trainer": {
"gpus": 1,
"accumulate_grad_batches": 8,
"progress_bar_refresh_rate": 1,
"fast_dev_run": True,
"num_sanity_val_steps": 0,
},
"train_loader": {
"batch_size": 8,
"shuffle": True,
"num_workers": 1,
"pin_memory": True,
"drop_last": True,
},
"valid_loader": {
"batch_size": 8,
"shuffle": False,
"num_workers": 1,
"pin_memory": True,
"drop_last": False,
},
"test_loader": {
"batch_size": 8,
"shuffle": False,
"num_workers": 1,
"pin_memory": True,
"drop_last": False,
},
"backbone": {
"name": "GroNLP/hateBERT",
"output_dim": 1,
},
"optimizer": {
"name": "torch.optim.AdamW",
"params": {
"lr": 1e-6,
},
},
"scheduler": {
"name": "torch.optim.lr_scheduler.CosineAnnealingWarmRestarts",
"params": {
"T_0": 20,
"eta_min": 0,
},
},
"loss": "nn.MSELoss",
}
config = Box(config)
config.tokenizer = AutoTokenizer.from_pretrained(config.backbone.name)
config.model = BertForMaskedLM.from_pretrained(config.backbone.name)
# pprint(config)
config.tokenizer.save_pretrained(f"../data/processed/{config.backbone.name}")
pretrain_model = BertForMaskedLM.from_pretrained(config.backbone.name)
pretrain_model.save_pretrained(f"../data/processed/{config.backbone.name}")
# 個人的にAWSやKaggle環境やGoogle Colabを行ったり来たりしているのでまとめています
import os
import sys
from pathlib import Path
if config.environment == 'AWS':
INPUT_DIR = Path('/mnt/work/data/kaggle/Jigsaw/')
MODEL_DIR = Path(f'../models/{config.exp_name}/')
OUTPUT_DIR = Path(f'../data/interim/{config.exp_name}/')
UTIL_DIR = Path('/mnt/work/shimizu/kaggle/PetFinder/src/utils')
os.makedirs(MODEL_DIR, exist_ok=True)
os.makedirs(OUTPUT_DIR, exist_ok=True)
print(f"Your environment is 'AWS'.\nINPUT_DIR is {INPUT_DIR}\nMODEL_DIR is {MODEL_DIR}\nOUTPUT_DIR is {OUTPUT_DIR}\nUTIL_DIR is {UTIL_DIR}")
elif config.environment == 'Kaggle':
INPUT_DIR = Path('../input/*****')
MODEL_DIR = Path('./')
OUTPUT_DIR = Path('./')
print(f"Your environment is 'Kaggle'.\nINPUT_DIR is {INPUT_DIR}\nMODEL_DIR is {MODEL_DIR}\nOUTPUT_DIR is {OUTPUT_DIR}")
elif config.environment == 'Colab':
INPUT_DIR = Path('/content/drive/MyDrive/kaggle/Jigsaw/raw')
BASE_DIR = Path("/content/drive/MyDrive/kaggle/Jigsaw/interim")
MODEL_DIR = BASE_DIR / f'{config.exp_name}'
OUTPUT_DIR = BASE_DIR / f'{config.exp_name}/'
os.makedirs(MODEL_DIR, exist_ok=True)
os.makedirs(OUTPUT_DIR, exist_ok=True)
if not os.path.exists(INPUT_DIR):
print('Please Mount your Google Drive.')
else:
print(f"Your environment is 'Colab'.\nINPUT_DIR is {INPUT_DIR}\nMODEL_DIR is {MODEL_DIR}\nOUTPUT_DIR is {OUTPUT_DIR}")
else:
print("Please choose 'AWS' or 'Kaggle' or 'Colab'.\nINPUT_DIR is not found.")
# Seed固定
seed_everything(config.seed)
## 処理時間計測
@contextmanager
def timer(name:str, slack:bool=False):
t0 = time.time()
p = psutil.Process(os.getpid())
m0 = p.memory_info()[0] / 2. ** 30
print(f'<< {name} >> Start')
yield
m1 = p.memory_info()[0] / 2. ** 30
delta = m1 - m0
sign = '+' if delta >= 0 else '-'
delta = math.fabs(delta)
print(f"<< {name} >> {m1:.1f}GB({sign}{delta:.1f}GB):{time.time() - t0:.1f}sec", file=sys.stderr)
```
<br>
<h1 style = "font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #4c1c84 ; color : #eeebf1; text-align: center; border-radius: 100px 100px;">
Data Load
</h1>
<br>
```
## Data Check
for dirnames, _, filenames in os.walk(INPUT_DIR):
for filename in filenames:
print(f'{dirnames}/{filename}')
val_df = pd.read_csv("/mnt/work/data/kaggle/Jigsaw/validation_data.csv")
test_df = pd.read_csv("/mnt/work/data/kaggle/Jigsaw/comments_to_score.csv")
display(val_df.head())
display(test_df.head())
```
<br>
<h2 style = "font-size:45px;
font-family:Comic Sans MS ;
font-weight : normal;
background-color: #eeebf1 ;
color : #4c1c84;
text-align: center;
border-radius: 100px 100px;">
Jigsaw Classification
</h2>
<br>
```
train_df = pd.read_csv("../data/external/jigsaw-classification/train.csv.zip")
display(train_df.head(10))
display(train_df.shape)
train_df["is_colon"] = train_df["comment_text"].progress_apply(lambda x:1 if ":" in x else 0)
def preprocess_text(txt:str) -> str:
new_texts = txt
new_texts = new_texts.replace(":", ",")
return new_texts
train_df["text"] = train_df["comment_text"].progress_apply(preprocess_text)
test_df["text"] = test_df["text"].progress_apply(preprocess_text)
val_df["less_toxic"] = val_df["less_toxic"].progress_apply(preprocess_text)
val_df["more_toxic"] = val_df["more_toxic"].progress_apply(preprocess_text)
import re
spell = SpellChecker(distance=1)
def misspelt_words_fn(dataframe: pd.DataFrame, col="text") -> Set[Text]:
misspelt_words = set()
for tweet in dataframe[col].str.casefold():
[misspelt_words.add(word) for word in spell.unknown(tweet.split())]
return misspelt_words
WORD = re.compile(r'\w+')
def reTokenize(tweet: Text) -> List[Text]:
return WORD.findall(tweet.casefold())
PATTERN = re.compile(r"(.)\1{2,}")
def reduce_lengthening(text: Text) -> Text:
return PATTERN.sub(r"\1\1", text)
def spell_correction(text: Text) -> Text:
return ' '.join([spell.correction(word)
if word in misspelt_words else word
for word in reTokenize(reduce_lengthening(text))])
misspelt_words = misspelt_words_fn(train_df, "text")
train_df["text"] = train_df["text"].progress_apply(spell_correction)
misspelt_words = misspelt_words_fn(test_df, "text")
test_df["text"] = test_df["text"].progress_apply(spell_correction)
misspelt_words = misspelt_words_fn(val_df, "less_toxic")
val_df["less_toxic"] = val_df["less_toxic"].progress_apply(spell_correction)
misspelt_words = misspelt_words_fn(val_df, "more_toxic")
val_df["more_toxic"] = val_df["more_toxic"].progress_apply(spell_correction)
target_cols = [
"toxic",
"severe_toxic",
"obscene",
"threat",
"insult",
"identity_hate"
]
plt.figure(figsize=(12, 5))
sns.histplot(train_df["toxic"], color="#4c1c84")
plt.grid()
plt.show()
train_df.head()
```
<br>
<h1 style = "font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #4c1c84 ; color : #eeebf1; text-align: center; border-radius: 100px 100px;">
Pytorch Dataset
</h1>
<br>
```
class JigsawDataset:
def __init__(self, df, tokenizer, max_length, mode, target_cols):
self.df = df
self.max_len = max_length
self.tokenizer = tokenizer
self.mode = mode
self.target_cols = target_cols
if self.mode == "train":
self.text = df["text"].values
self.target = df[target_cols].values
elif self.mode == "valid":
self.more_toxic = df["more_toxic"].values
self.less_toxic = df["less_toxic"].values
else:
self.text = df["text"].values
def __len__(self):
return len(self.df)
def __getitem__(self, index):
if self.mode == "train":
text = self.text[index]
target = self.target[index]
inputs_text = self.tokenizer.encode_plus(
text,
truncation=True,
return_attention_mask=True,
return_token_type_ids=True,
max_length = self.max_len,
padding="max_length",
)
text_ids = inputs_text["input_ids"]
text_mask = inputs_text["attention_mask"]
text_token_type_ids = inputs_text["token_type_ids"]
return {
'text_ids': torch.tensor(text_ids, dtype=torch.long),
'text_mask': torch.tensor(text_mask, dtype=torch.long),
'text_token_type_ids': torch.tensor(text_token_type_ids, dtype=torch.long),
'target': torch.tensor(target, dtype=torch.float)
}
elif self.mode == "valid":
more_toxic = self.more_toxic[index]
less_toxic = self.less_toxic[index]
inputs_more_toxic = self.tokenizer.encode_plus(
more_toxic,
truncation=True,
return_attention_mask=True,
return_token_type_ids=True,
max_length = self.max_len,
padding="max_length",
)
inputs_less_toxic = self.tokenizer.encode_plus(
less_toxic,
truncation=True,
return_attention_mask=True,
return_token_type_ids=True,
max_length = self.max_len,
padding="max_length",
)
target = 1
more_toxic_ids = inputs_more_toxic["input_ids"]
more_toxic_mask = inputs_more_toxic["attention_mask"]
more_token_type_ids = inputs_more_toxic["token_type_ids"]
less_toxic_ids = inputs_less_toxic["input_ids"]
less_toxic_mask = inputs_less_toxic["attention_mask"]
less_token_type_ids = inputs_less_toxic["token_type_ids"]
return {
'more_toxic_ids': torch.tensor(more_toxic_ids, dtype=torch.long),
'more_toxic_mask': torch.tensor(more_toxic_mask, dtype=torch.long),
'more_token_type_ids': torch.tensor(more_token_type_ids, dtype=torch.long),
'less_toxic_ids': torch.tensor(less_toxic_ids, dtype=torch.long),
'less_toxic_mask': torch.tensor(less_toxic_mask, dtype=torch.long),
'less_token_type_ids': torch.tensor(less_token_type_ids, dtype=torch.long),
'target': torch.tensor(target, dtype=torch.float)
}
else:
text = self.text[index]
inputs_text = self.tokenizer.encode_plus(
text,
truncation=True,
return_attention_mask=True,
return_token_type_ids=True,
max_length = self.max_len,
padding="max_length",
)
text_ids = inputs_text["input_ids"]
text_mask = inputs_text["attention_mask"]
text_token_type_ids = inputs_text["token_type_ids"]
return {
'text_ids': torch.tensor(text_ids, dtype=torch.long),
'text_mask': torch.tensor(text_mask, dtype=torch.long),
'text_token_type_ids': torch.tensor(text_token_type_ids, dtype=torch.long),
}
```
<br>
<h2 style = "font-size:45px;
font-family:Comic Sans MS ;
font-weight : normal;
background-color: #eeebf1 ;
color : #4c1c84;
text-align: center;
border-radius: 100px 100px;">
DataModule
</h2>
<br>
```
class JigsawDataModule(LightningDataModule):
def __init__(self, train_df, valid_df, test_df, cfg):
super().__init__()
self._train_df = train_df
self._valid_df = valid_df
self._test_df = test_df
self._cfg = cfg
def train_dataloader(self):
dataset = JigsawDataset(
df=self._train_df,
tokenizer=self._cfg.tokenizer,
max_length=self._cfg.max_length,
mode="train",
target_cols=target_cols
)
return DataLoader(dataset, **self._cfg.train_loader)
def val_dataloader(self):
dataset = JigsawDataset(
df=self._valid_df,
tokenizer=self._cfg.tokenizer,
max_length=self._cfg.max_length,
mode="valid",
target_cols=target_cols
)
return DataLoader(dataset, **self._cfg.valid_loader)
def test_dataloader(self):
dataset = JigsawDataset(
df=self._test_df,
tokenizer = self._cfg.tokenizer,
max_length=self._cfg.max_length,
mode="test",
target_cols=target_cols
)
return DataLoader(dataset, **self._cfg.test_loader)
## DataCheck
seed_everything(config.seed)
sample_dataloader = JigsawDataModule(train_df, val_df, test_df, config).train_dataloader()
for data in sample_dataloader:
break
print(data["text_ids"].size())
print(data["text_mask"].size())
print(data["text_token_type_ids"].size())
print(data["target"].size())
print(data["target"])
output = config.model(
data["text_ids"],
data["text_mask"],
data["text_token_type_ids"],
output_hidden_states=True,
output_attentions=True,
)
print(output["hidden_states"][-1].size(), output["attentions"][-1].size())
print(output["hidden_states"][-1][:, 0, :].size(), output["attentions"][-1].size())
```
<br>
<h2 style = "font-size:45px;
font-family:Comic Sans MS ;
font-weight : normal;
background-color: #eeebf1 ;
color : #4c1c84;
text-align: center;
border-radius: 100px 100px;">
LigitningModule
</h2>
<br>
```
class JigsawModel(pl.LightningModule):
def __init__(self, cfg, fold_num):
super().__init__()
self.cfg = cfg
self.__build_model()
self.criterion = eval(self.cfg.loss)()
self.save_hyperparameters(cfg)
self.fold_num = fold_num
def __build_model(self):
self.base_model = BertForMaskedLM.from_pretrained(
self.cfg.backbone.name
)
print(f"Use Model: {self.cfg.backbone.name}")
self.norm = nn.LayerNorm(768)
self.drop = nn.Dropout(p=0.3)
self.head = nn.Linear(768, self.cfg.backbone.output_dim)
def forward(self, ids, mask, token_type_ids):
output = self.base_model(
input_ids=ids,
attention_mask=mask,
token_type_ids=token_type_ids,
output_hidden_states=True,
output_attentions=True
)
feature = self.norm(output["hidden_states"][-1][:, 0, :])
out = self.drop(feature)
out = self.head(out)
return {
"logits":out,
"feature":feature,
"attention":output["attentions"],
"mask":mask,
}
def training_step(self, batch, batch_idx):
text_ids = batch["text_ids"]
text_mask = batch['text_mask']
text_token_type_ids = batch['text_token_type_ids']
targets = batch['target']
outputs = self.forward(text_ids, text_mask, text_token_type_ids)
loss = torch.sqrt(self.criterion(outputs["logits"], targets))
return {
"loss":loss,
"targets":targets,
}
def training_epoch_end(self, training_step_outputs):
loss_list = []
for out in training_step_outputs:
loss_list.extend([out["loss"].cpu().detach().tolist()])
meanloss = sum(loss_list)/len(loss_list)
logs = {f"train_loss/fold{self.fold_num+1}": meanloss,}
self.log_dict(
logs,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True
)
def validation_step(self, batch, batch_idx):
more_toxic_ids = batch['more_toxic_ids']
more_toxic_mask = batch['more_toxic_mask']
more_text_token_type_ids = batch['more_token_type_ids']
less_toxic_ids = batch['less_toxic_ids']
less_toxic_mask = batch['less_toxic_mask']
less_text_token_type_ids = batch['less_token_type_ids']
targets = batch['target']
more_outputs = self.forward(
more_toxic_ids,
more_toxic_mask,
more_text_token_type_ids
)
less_outputs = self.forward(
less_toxic_ids,
less_toxic_mask,
less_text_token_type_ids
)
more_outputs = torch.sum(more_outputs["logits"], 1)
less_outputs = torch.sum(less_outputs["logits"], 1)
outputs = more_outputs - less_outputs
logits = outputs.clone()
logits[logits > 0] = 1
loss = self.criterion(logits, targets)
return {
"loss":loss,
"pred":outputs,
"targets":targets,
}
def validation_epoch_end(self, validation_step_outputs):
loss_list = []
pred_list = []
target_list = []
for out in validation_step_outputs:
loss_list.extend([out["loss"].cpu().detach().tolist()])
pred_list.append(out["pred"].detach().cpu().numpy())
target_list.append(out["targets"].detach().cpu().numpy())
meanloss = sum(loss_list)/len(loss_list)
pred_list = np.concatenate(pred_list)
pred_count = sum(x>0 for x in pred_list)/len(pred_list)
logs = {
f"valid_loss/fold{self.fold_num+1}":meanloss,
f"valid_acc/fold{self.fold_num+1}":pred_count,
}
self.log_dict(
logs,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True
)
def configure_optimizers(self):
optimizer = eval(self.cfg.optimizer.name)(
self.parameters(), **self.cfg.optimizer.params
)
self.scheduler = eval(self.cfg.scheduler.name)(
optimizer, **self.cfg.scheduler.params
)
scheduler = {"scheduler": self.scheduler, "interval": "step",}
return [optimizer], [scheduler]
```
<br>
<h2 style = "font-size:45px;
font-family:Comic Sans MS ;
font-weight : normal;
background-color: #eeebf1 ;
color : #4c1c84;
text-align: center;
border-radius: 100px 100px;">
Training
</h2>
<br>
```
skf = KFold(
n_splits=config.n_fold,
shuffle=True,
random_state=config.seed
)
for fold, (_, val_idx) in enumerate(skf.split(X=train_df, y=train_df["toxic"])):
train_df.loc[val_idx, "kfold"] = int(fold)
train_df["kfold"] = train_df["kfold"].astype(int)
train_df.head()
## Debug
config.trainer.fast_dev_run = True
config.backbone.output_dim = len(target_cols)
for fold in config.train_fold:
print("★"*25, f" Fold{fold+1} ", "★"*25)
df_train = train_df[train_df.kfold != fold].reset_index(drop=True)
datamodule = JigsawDataModule(df_train, val_df, test_df, config)
sample_dataloader = JigsawDataModule(df_train, val_df, test_df, config).train_dataloader()
config.scheduler.params.T_0 = config.epoch * len(sample_dataloader)
model = JigsawModel(config, fold)
lr_monitor = callbacks.LearningRateMonitor()
loss_checkpoint = callbacks.ModelCheckpoint(
filename=f"best_acc_fold{fold+1}",
monitor=f"valid_acc/fold{fold+1}",
save_top_k=1,
mode="max",
save_last=False,
dirpath=MODEL_DIR,
save_weights_only=True,
)
wandb_logger = WandbLogger(
project=config.project,
entity=config.entity,
name = f"{config.exp_name}",
tags = ['Hate-BERT', "Jigsaw-Classification"]
)
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer(
max_epochs=config.epoch,
callbacks=[loss_checkpoint, lr_monitor, RichProgressBar()],
# deterministic=True,
logger=[wandb_logger],
**config.trainer
)
trainer.fit(model, datamodule=datamodule)
## Training
config.trainer.fast_dev_run = False
config.backbone.output_dim = len(target_cols)
for fold in config.train_fold:
print("★"*25, f" Fold{fold+1} ", "★"*25)
df_train = train_df[train_df.kfold != fold].reset_index(drop=True)
datamodule = JigsawDataModule(df_train, val_df, test_df, config)
sample_dataloader = JigsawDataModule(df_train, val_df, test_df, config).train_dataloader()
config.scheduler.params.T_0 = config.epoch * len(sample_dataloader)
model = JigsawModel(config, fold)
lr_monitor = callbacks.LearningRateMonitor()
loss_checkpoint = callbacks.ModelCheckpoint(
filename=f"best_acc_fold{fold+1}",
monitor=f"valid_acc/fold{fold+1}",
save_top_k=1,
mode="max",
save_last=False,
dirpath=MODEL_DIR,
save_weights_only=True,
)
wandb_logger = WandbLogger(
project=config.project,
entity=config.entity,
name = f"{config.exp_name}",
tags = ['Hate-BERT', "Jigsaw-Classification"]
)
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer(
max_epochs=config.epoch,
callbacks=[loss_checkpoint, lr_monitor, RichProgressBar()],
# deterministic=True,
logger=[wandb_logger],
**config.trainer
)
trainer.fit(model, datamodule=datamodule)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
config.backbone.output_dim = len(target_cols)
print(f"Device == {device}")
MORE = np.zeros((len(val_df), config.backbone.output_dim))
LESS = np.zeros((len(val_df), config.backbone.output_dim))
PRED = np.zeros((len(test_df), config.backbone.output_dim))
attention_array = np.zeros((len(val_df), 256)) # attention格納
mask_array = np.zeros((len(val_df), 256)) # mask情報格納,後でattentionと掛け合わせる
for fold in config.train_fold:
pred_list = []
print("★"*25, f" Fold{fold+1} ", "★"*25)
valid_dataloader = JigsawDataModule(train_df, val_df, test_df, config).val_dataloader()
model = JigsawModel(config, fold)
loss_checkpoint = callbacks.ModelCheckpoint(
filename=f"best_acc_fold{fold+1}",
monitor=f"valid_acc/fold{fold+1}",
save_top_k=1,
mode="max",
save_last=False,
dirpath="../input/toxicroberta/",
)
model = model.load_from_checkpoint(MODEL_DIR/f"best_acc_fold{fold+1}-v1.ckpt", cfg=config, fold_num=fold)
model.to(device)
model.eval()
more_list = []
less_list = []
for step, data in tqdm(enumerate(valid_dataloader), total=len(valid_dataloader)):
more_toxic_ids = data['more_toxic_ids'].to(device)
more_toxic_mask = data['more_toxic_mask'].to(device)
more_text_token_type_ids = data['more_token_type_ids'].to(device)
less_toxic_ids = data['less_toxic_ids'].to(device)
less_toxic_mask = data['less_toxic_mask'].to(device)
less_text_token_type_ids = data['less_token_type_ids'].to(device)
more_outputs = model(
more_toxic_ids,
more_toxic_mask,
more_text_token_type_ids,
)
less_outputs = model(
less_toxic_ids,
less_toxic_mask,
less_text_token_type_ids
)
more_list.append(more_outputs["logits"].detach().cpu().numpy())
less_list.append(less_outputs["logits"].detach().cpu().numpy())
MORE += np.concatenate(more_list)/len(config.train_fold)
LESS += np.concatenate(less_list)/len(config.train_fold)
# PRED += pred_list/len(config.train_fold)
plt.figure(figsize=(12, 5))
plt.scatter(LESS, MORE)
plt.xlabel("less-toxic")
plt.ylabel("more-toxic")
plt.grid()
plt.show()
val_df["less_attack"] = LESS.sum(axis=1)
val_df["more_attack"] = MORE.sum(axis=1)
val_df["diff_attack"] = val_df["more_attack"] - val_df["less_attack"]
attack_score = val_df[val_df["diff_attack"]>0]["diff_attack"].count()/len(val_df)
print(f"exp033 Score: {attack_score:.6f}")
```
<br>
<h2 style = "font-size:45px;
font-family:Comic Sans MS ;
font-weight : normal;
background-color: #eeebf1 ;
color : #4c1c84;
text-align: center;
border-radius: 100px 100px;">
Attention Visualize
</h2>
<br>
```
text_df = pd.DataFrame()
text_df["text"] = list(set(val_df["less_toxic"].unique().tolist() + val_df["more_toxic"].unique().tolist()))
display(text_df.head())
display(text_df.shape)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
config.backbone.output_dim = len(target_cols)
print(f"Device == {device}")
attention_array = np.zeros((len(text_df), config.max_length)) # attention格納
mask_array = np.zeros((len(text_df), config.max_length)) # mask情報格納,後でattentionと掛け合わせる
feature_array = np.zeros((len(text_df), 768))
PRED = np.zeros((len(text_df), config.backbone.output_dim))
for fold in config.train_fold:
pred_list = []
print("★"*25, f" Fold{fold+1} ", "★"*25)
test_dataloader = JigsawDataModule(train_df, val_df, text_df, config).test_dataloader()
model = JigsawModel(config, fold)
loss_checkpoint = callbacks.ModelCheckpoint(
filename=f"best_acc_fold{fold+1}",
monitor=f"valid_acc/fold{fold+1}",
save_top_k=1,
mode="max",
save_last=False,
dirpath="../input/toxicroberta/",
)
model = model.load_from_checkpoint(MODEL_DIR/f"best_acc_fold{fold+1}-v1.ckpt", cfg=config, fold_num=fold)
model.to(device)
model.eval()
attention_list = []
feature_list = []
mask_list = []
pred_list = []
for step, data in tqdm(enumerate(test_dataloader), total=len(test_dataloader)):
text_ids = data["text_ids"].to(device)
text_mask = data["text_mask"].to(device)
text_token_type_ids = data["text_token_type_ids"].to(device)
mask_list.append(text_mask.detach().cpu().numpy())
outputs = model(
text_ids,
text_mask,
text_token_type_ids,
)
## Last LayerのCLS Tokenに対するAttention
last_attention = outputs["attention"][-1].detach().cpu().numpy()
total_attention = np.zeros((last_attention.shape[0], config.max_length))
for batch in range(last_attention.shape[0]):
for n_head in range(12):
total_attention[batch, :] += last_attention[batch, n_head, 0, :]
attention_list.append(total_attention)
pred_list.append(outputs["logits"].detach().cpu().numpy())
feature_list.append(outputs["feature"].detach().cpu().numpy())
attention_array += np.concatenate(attention_list)/config.n_fold
mask_array += np.concatenate(mask_list)/config.n_fold
feature_array += np.concatenate(feature_list)/config.n_fold
PRED += np.concatenate(pred_list)/len(config.train_fold)
text_df["target"] = PRED[:, 0]
text_df.to_pickle(OUTPUT_DIR/"text_df.pkl")
np.save(OUTPUT_DIR/'toxic-bert-exp033-attention.npy', attention_array)
np.save(OUTPUT_DIR/'toxic-bert-exp033-mask.npy', mask_array)
np.save(OUTPUT_DIR/'toxic-bert-exp033-feature.npy', feature_array)
plt.figure(figsize=(12, 5))
sns.histplot(text_df["target"], color="#4c1c84")
plt.grid()
plt.show()
```
<br>
<h2 style = "font-size:45px;
font-family:Comic Sans MS ;
font-weight : normal;
background-color: #eeebf1 ;
color : #4c1c84;
text-align: center;
border-radius: 100px 100px;">
Attention Load
</h2>
<br>
```
text_df = pd.read_pickle(OUTPUT_DIR/"text_df.pkl")
attention_array = np.load(OUTPUT_DIR/'toxic-bert-exp033-attention.npy')
mask_array = np.load(OUTPUT_DIR/'toxic-bert-exp033-mask.npy')
feature_array = np.load(OUTPUT_DIR/'toxic-bert-exp033-feature.npy')
from IPython.display import display, HTML
def highlight_r(word, attn):
html_color = '#%02X%02X%02X' % (255, int(255*(1 - attn)), int(255*(1 - attn)))
return '<span style="background-color: {}">{}</span>'.format(html_color, word)
num = 12
ids = config.tokenizer(text_df.loc[num, "text"])["input_ids"]
tokens = config.tokenizer.convert_ids_to_tokens(ids)
attention = attention_array[num, :][np.nonzero(mask_array[num, :])]
html_outputs = []
for word, attn in zip(tokens, attention):
html_outputs.append(highlight_r(word, attn))
print(f"Offensive Score is {PRED[num, 0]}")
display(HTML(' '.join(html_outputs)))
display(text_df.loc[num, "text"])
text_df.sort_values("target", ascending=False).head(20)
high_score_list = text_df.sort_values("target", ascending=False).head(20).index.tolist()
for num in high_score_list:
ids = config.tokenizer(text_df.loc[num, "text"])["input_ids"]
tokens = config.tokenizer.convert_ids_to_tokens(ids)
attention = attention_array[num, :][np.nonzero(mask_array[num, :])]
html_outputs = []
for word, attn in zip(tokens, attention):
html_outputs.append(highlight_r(word, attn))
print(f"Offensive Score is {PRED[num, 0]}")
display(HTML(' '.join(html_outputs)))
display(text_df.loc[num, "text"])
```
| github_jupyter |
```
import torch
import time
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset,DataLoader
import pandas as pd
import numpy as np
!pip install transformers==3.3.1
!pip install sentencepiece
import sentencepiece
from transformers import AutoModel, AutoTokenizer
from torch import cuda
device = 'cuda' if torch.cuda.is_available() else 'cpu'
from google.colab import drive
drive.mount('/content/drive')
train = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/kannada_offensive_train (1).csv', delimiter='\t', names=['text','label','nan'])
train = train.drop(columns=['nan'])
train.label = train.label.apply({'Not_offensive':0,'Offensive_Untargetede':1,'Offensive_Targeted_Insult_Group':2,'Offensive_Targeted_Insult_Individual':3,'not-Kannada':4, 'Offensive_Targeted_Insult_Other':5}.get)
train.head(9)
val = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/kannada_offensive_dev.csv', delimiter='\t', names=['text','label','nan'])
val = val.drop(columns=['nan'])
val.label = val.label.apply({'Not_offensive':0,'Offensive_Untargetede':1,'Offensive_Targeted_Insult_Group':2,'Offensive_Targeted_Insult_Individual':3,'not-Kannada':4, 'Offensive_Targeted_Insult_Other':5}.get)
from numpy import random
test = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/kannada_offensive_test.csv',delimiter='\t',names=['text','label'])
def addLabel():
test['label'] = [random.choice([0, 1, 2, 3, 4, 5]) for text in test.text]
addLabel()
test.head(9)
import re
def clean(df):
df['text'] = df['text'].apply(lambda x: x.lower())
df['text'] = df['text'].apply(lambda x: re.sub(r' +', ' ',x))
df['text'] = df['text'].apply(lambda x: re.sub("[!@#$+%*:()'-]", ' ',x))
df['text'] = df['text'].str.replace('\d+', '')
clean(train)
clean(val)
clean(test)
import pandas as pd
from torch.utils.data import Dataset,DataLoader
class RFDataset(Dataset):
def __init__(self,text,label,tokenizer,max_len):
self.text = text
self.label = label
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.text)
def __getitem__(self,item):
text = str(self.text[item])
label = self.label[item]
encoding = self.tokenizer.encode_plus(
text,
add_special_tokens=True,
max_length = self.max_len,
return_token_type_ids = False,
padding = 'max_length',
return_attention_mask= True,
return_tensors='pt',
truncation=True
)
return {
'text' : text,
'input_ids' : encoding['input_ids'].flatten(),
'attention_mask' : encoding['attention_mask'].flatten(),
'label' : torch.tensor(label,dtype=torch.long)
}
def create_data_loader(df,tokenizer,max_len,batch_size):
ds = RFDataset(
text = df.text.to_numpy(),
label = df.label.to_numpy(),
tokenizer = tokenizer,
max_len = max_len
)
return DataLoader(ds,
batch_size = batch_size,
shuffle = True,
num_workers=4)
MODEL_TYPE = 'xlm-roberta-base'
tokenizer = AutoTokenizer.from_pretrained(MODEL_TYPE)
BATCH_SIZE = 32
MAX_LEN = 128
train_data_loader = create_data_loader(train,tokenizer,MAX_LEN,BATCH_SIZE)
val_data_loader = create_data_loader(val,tokenizer,MAX_LEN,BATCH_SIZE)
def create_dataloader(df,tokenizer,max_len,batch_size):
ds = RFDataset(
text = df.text.to_numpy(),
label = df.label.to_numpy(),
tokenizer = tokenizer,
max_len = max_len
)
return DataLoader(ds,
batch_size = batch_size,
shuffle = False,
num_workers=4)
test_data_loader = create_dataloader(test,tokenizer,MAX_LEN,BATCH_SIZE)
print('Training set size:',train.shape)
print('validation set size:',val.shape)
print('Testing set size:',test.shape)
import torch.nn as nn
class XLMRobertaclass(nn.Module):
def __init__(self, n_classes):
super(XLMRobertaclass, self).__init__()
self.auto = AutoModel.from_pretrained('xlm-roberta-base')
self.drop = nn.Dropout(p=0.4)
self.out1 = nn.Linear(self.auto.config.hidden_size, 128)
self.drop1 = nn.Dropout(p=0.4)
self.relu = nn.ReLU()
self.out = nn.Linear(128, n_classes)
def forward(self, input_ids, attention_mask):
_,pooled_output = self.auto(
input_ids=input_ids,
attention_mask=attention_mask
)
output = self.drop(pooled_output)
output = self.out1(output)
output = self.relu(output)
output = self.drop1(output)
return self.out(output)
model = XLMRobertaclass(6)
model = model.to(device)
from transformers import AdamW,get_linear_schedule_with_warmup
EPOCHS = 10
optimizer = AdamW(model.parameters(), lr=2e-5, correct_bias=False)
total_steps = len(train_data_loader) * EPOCHS
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=total_steps
)
loss_fn = nn.CrossEntropyLoss().to(device)
def train_epoch(model,data_loader,loss_fn,optimizer,device,scheduler,n_examples):
model = model.train()
losses = []
correct_predictions = 0
for data in data_loader:
input_ids = data['input_ids'].to(device)
attention_mask = data['attention_mask'].to(device)
label = data['label'].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
_, preds = torch.max(outputs, dim=1)
loss = loss_fn(outputs,label)
correct_predictions += torch.sum(preds == label)
losses.append(loss.item())
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
return correct_predictions.double() / n_examples, np.mean(losses)
def eval_model(model, data_loader, loss_fn, device, n_examples):
model = model.eval()
losses = []
correct_predictions = 0
with torch.no_grad():
for d in data_loader:
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
label = d["label"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
_, preds = torch.max(outputs, dim=1)
loss = loss_fn(outputs, label)
correct_predictions += torch.sum(preds == label)
losses.append(loss.item())
return correct_predictions.double() / n_examples, np.mean(losses)
import time
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
from collections import defaultdict
import torch
history = defaultdict(list)
best_accuracy = 0
for epoch in range(EPOCHS):
start_time = time.time()
train_acc,train_loss = train_epoch(
model,
train_data_loader,
loss_fn,
optimizer,
device,
scheduler,
len(train)
)
val_acc,val_loss = eval_model(
model,
val_data_loader,
loss_fn,
device,
len(val)
)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'Train Loss {train_loss} accuracy {train_acc}')
print(f'Val Loss {val_loss} accuracy {val_acc}')
print()
history['train_acc'].append(train_acc)
history['train_loss'].append(train_loss)
history['val_acc'].append(val_acc)
history['val_loss'].append(val_loss)
if train_acc > best_accuracy:
torch.save(model.state_dict(),'xlm-roberta-base.bin')
best_accuracy = train_acc
import matplotlib.pyplot as plt
plt.plot(history['train_acc'], label='train accuracy')
plt.plot(history['val_acc'], label='validation accuracy')
plt.title('Training history')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend()
# plt.ylim([0, 1]);
val_acc.item()
def get_predictions(model, data_loader):
model = model.eval()
sentence = []
predictions = []
prediction_probs = []
real_values = []
with torch.no_grad():
for d in data_loader:
text = d["text"]
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
label = d["label"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
_, preds = torch.max(outputs, dim=1)
sentence.extend(text)
predictions.extend(preds)
prediction_probs.extend(outputs)
real_values.extend(label)
predictions = torch.stack(predictions).cpu()
prediction_probs = torch.stack(prediction_probs).cpu()
real_values = torch.stack(real_values).cpu()
return sentence, predictions, prediction_probs, real_values
y_review_texts, y_pred, y_pred_probs, y_test = get_predictions(
model,
test_data_loader
)
a = {'id':[i for i in range(778)]}
a = pd.DataFrame(a)
df = pd.DataFrame({'id':a.id,'text':y_review_texts,'label':y_pred.tolist()})
df.label = df.label.apply({0:'Not_offensive',1:'Offensive_Untargetede',2:'Offensive_Targeted_Insult_Group',3:'Offensive_Targeted_Insult_Individual',4:'not-Kannada', 5:'Offensive_Targeted_Insult_Other'}.get)
df
df.to_csv('XLMRoberta_Kannada_submission.csv',index=False)
from google.colab import files
files.download("XLMRoberta_Kannada_submission.csv")
```
| github_jupyter |
```
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
<!-- <table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/notebook_template.ipynb"">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/notebook_template.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
</table> -->
# Orchestrating ML workflow to Train and Deploy a PyTorch Text Classification Model on [Vertex AI Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines/introduction)
## Overview
This notebook is an extension to the [previous notebook](./pytorch-text-classification-vertex-ai-train-tune-deploy.ipynb) to fine-tune and deploy a [pre-trained BERT model from HuggingFace Hub](https://huggingface.co/bert-base-cased) for sentiment classification task. This notebook shows how to automate and monitor a PyTorch based ML workflow by orchestrating the pipeline in a serverless manner using [Vertex AI Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines/introduction).
The notebook defines a pipeline using [Kubeflow Pipelines v2 (`kfp.v2`) SDK](https://www.kubeflow.org/docs/components/pipelines/sdk-v2/) and submits the pipeline to Vertex AI Pipelines services.
### Dataset
The notebook uses [IMDB movie review dataset](https://huggingface.co/datasets/imdb) from [Hugging Face Datasets](https://huggingface.co/datasets).
### Objective
How to **orchestrate PyTorch ML workflows on [Vertex AI](https://cloud.google.com/vertex-ai)** and emphasize first class support for training, deploying and orchestrating PyTorch workflows on Vertex AI.
### Table of Contents
This notebook covers following sections:
---
- [High Level Flow of Building a Pipeline](#High-Level-Flow-of-Building-a-Pipeline): Understand pipeline concepts and pipeline schematic
- [Define the Pipeline Components](#Define-the-Pipeline-Components-for-PyTorch-based-ML-Workflow): Authoring custom pipeline components for PyTorch based ML Workflow
- [Define Pipeline Specification](#Define-Pipeline-Specification): Author pipeline specification using KFP v2 SDK for PyTorch based ML workflow
- [Submit Pipeline](#Submit-Pipeline): Compile and execute pipeline on Vertex AI Pipelines
- [Monitoring the Pipeline](#Monitoring-the-Pipeline): Monitor progress of pipeline and view logs, lineage, artifacts and pipeline runs
---
### Costs
This tutorial uses billable components of Google Cloud Platform (GCP):
* [Vertex AI Workbench](https://cloud.google.com/vertex-ai-workbench)
* [Vertex AI Training](https://cloud.google.com/vertex-ai/docs/training/custom-training)
* [Vertex AI Predictions](https://cloud.google.com/vertex-ai/docs/predictions/getting-predictions)
* [Vertex AI Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines/introduction)
* [Cloud Storage](https://cloud.google.com/storage)
* [Container Registry](https://cloud.google.com/container-registry)
* [Cloud Build](https://cloud.google.com/build) *[Optional]*
Learn about [Vertex AI Pricing](https://cloud.google.com/vertex-ai/pricing), [Cloud Storage Pricing](https://cloud.google.com/storage/pricing) and [Cloud Build Pricing](https://cloud.google.com/build/pricing), and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage.
***
**NOTE:** This notebook does not require a GPU runtime. However, you must have GPU quota for running the jobs with GPUs launched by pipelines. Check the [quotas](https://console.cloud.google.com/iam-admin/quotas) page to ensure that you have enough GPUs available in your project. If GPUs are not listed on the quotas page or you require additional GPU quota, [request a quota increase](https://cloud.google.com/compute/quotas#requesting_additional_quota). Free Trial accounts do not receive GPU quota by default.
***
### Set up your local development environment
**If you are using Colab or Google Cloud Notebooks**, your environment already meets
all the requirements to run this notebook. You can skip this step.
**Otherwise**, make sure your environment meets this notebook's requirements.
You need the following:
* The Google Cloud SDK
* Git
* Python 3
* virtualenv
* Jupyter notebook running in a virtual environment with Python 3
The Google Cloud guide to [Setting up a Python development
environment](https://cloud.google.com/python/setup) and the [Jupyter
installation guide](https://jupyter.org/install) provide detailed instructions
for meeting these requirements. The following steps provide a condensed set of
instructions:
1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)
2. [Install Python 3.](https://cloud.google.com/python/setup#installing_python)
3. [Install virtualenv](https://cloud.google.com/python/setup#installing_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment.
4. To install Jupyter, run `pip3 install jupyter` on the command-line in a terminal shell.
5. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.
6. Open this notebook in the Jupyter Notebook Dashboard.
### Install additional packages
Following are the Python dependencies required for this notebook and will be installed in the Notebooks instance itself.
- [Kubeflow Pipelines v2 SDK](https://pypi.org/project/kfp/)
- [Google Cloud Pipeline Components](https://pypi.org/project/google-cloud-pipeline-components/)
- [Vertex AI SDK for Python](https://pypi.org/project/google-cloud-aiplatform/)
---
The notebook has been tested with the following versions of Kubeflow Pipelines SDK and Google Cloud Pipeline Components
```
kfp version: 1.8.10
google_cloud_pipeline_components version: 0.2.2
```
---
```
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# Google Cloud Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
!pip -q install {USER_FLAG} --upgrade kfp
!pip -q install {USER_FLAG} --upgrade google-cloud-pipeline-components
```
#### Install Vertex AI SDK for Python
The notebook uises [Vertex AI SDK for Python](https://cloud.google.com/vertex-ai/docs/start/client-libraries#python) to interact with Vertex AI services. The high-level `google-cloud-aiplatform` library is designed to simplify common data science workflows by using wrapper classes and opinionated defaults.
```
!pip -q install {USER_FLAG} --upgrade google-cloud-aiplatform
```
### Restart the kernel
After you install the additional packages, you need to restart the notebook kernel so it can find the packages.
```
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
```
Check the versions of the packages you installed. The KFP SDK version should be >=1.6.
```
!python3 -c "import kfp; print('kfp version: {}'.format(kfp.__version__))"
!python3 -c "import google_cloud_pipeline_components; print('google_cloud_pipeline_components version: {}'.format(google_cloud_pipeline_components.__version__))"
```
## Before you begin
This notebook does not require a GPU runtime.
### Set up your Google Cloud project
**The following steps are required, regardless of your notebook environment.**
1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).
1. Enable following APIs in your project required for running the tutorial
- [Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com)
- [Cloud Storage API](https://console.cloud.google.com/flows/enableapi?apiid=storage.googleapis.com)
- [Container Registry API](https://console.cloud.google.com/flows/enableapi?apiid=containerregistry.googleapis.com)
- [Cloud Build API](https://console.cloud.google.com/flows/enableapi?apiid=cloudbuild.googleapis.com)
1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).
1. Enter your project ID in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook.
**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
#### Set your project ID
**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
```
PROJECT_ID = "[your-project-id]" # <---CHANGE THIS TO YOUR PROJECT
import os
# Get your Google Cloud project ID using google.auth
if not os.getenv("IS_TESTING"):
import google.auth
_, PROJECT_ID = google.auth.default()
print("Project ID: ", PROJECT_ID)
# validate PROJECT_ID
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
print(
f"Please set your project id before proceeding to next step. Currently it's set as {PROJECT_ID}"
)
```
#### Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
```
from datetime import datetime
def get_timestamp():
return datetime.now().strftime("%Y%m%d%H%M%S")
TIMESTAMP = get_timestamp()
print(f"TIMESTAMP = {TIMESTAMP}")
```
### Authenticate your Google Cloud account
---
**If you are using Google Cloud Notebooks**, your environment is already authenticated. Skip this step.
---
**If you are using Colab**, run the cell below and follow the instructions
when prompted to authenticate your account via oAuth.
**Otherwise**, follow these steps:
1. In the Cloud Console, go to the [**Create service account key** page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).
2. Click **Create service account**.
3. In the **Service account name** field, enter a name, and click **Create**.
4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "Vertex AI" into the filter box, and select **Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
5. Click *Create*. A JSON file that contains your key downloads to your local environment.
6. Enter the path to your service account key as the `GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell.
```
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# If on Google Cloud Notebooks, then don't execute this code
if not IS_GOOGLE_CLOUD_NOTEBOOK:
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
```
### Create a Cloud Storage bucket
**The following steps are required, regardless of your notebook environment.**
When you submit a training job using the Cloud SDK, you upload a Python package containing your training code to a Cloud Storage bucket. Vertex AI runs the code from this package. In this tutorial, Vertex AI also saves the trained model that results from your job in the same bucket. Using this model artifact, you can then create Vertex AI model and endpoint resources in order to serve online predictions.
Set the name of your Cloud Storage bucket below. It must be unique across all Cloud Storage buckets.
You may also change the `REGION` variable, which is used for operations throughout the rest of this notebook. Make sure to [choose a region where Vertex AI services are available](https://cloud.google.com/vertex-ai/docs/general/locations#available_regions). You may not use a Multi-Regional Storage bucket for training with Vertex AI.
```
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
```
---
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
---
```
! gsutil mb -l $REGION $BUCKET_NAME
```
Finally, validate access to your Cloud Storage bucket by examining its contents:
```
! gsutil ls -al $BUCKET_NAME
```
### Import libraries and define constants
Import python libraries required to run the pipeline and define constants.
```
%load_ext autoreload
%autoreload 2
import os
from typing import NamedTuple
import google_cloud_pipeline_components
import kfp
from google.cloud import aiplatform
from google.cloud.aiplatform import gapic as aip
from google.cloud.aiplatform import pipeline_jobs
from google.protobuf.json_format import MessageToDict
from google_cloud_pipeline_components import aiplatform as aip_components
from google_cloud_pipeline_components.experimental import custom_job
from kfp.v2 import compiler, dsl
from kfp.v2.dsl import Input, Metrics, Model, Output, component
APP_NAME = "finetuned-bert-classifier"
PATH = %env PATH
%env PATH={PATH}:/home/jupyter/.local/bin
# Pipeline root is the GCS path to store the artifacts from the pipeline runs
PIPELINE_ROOT = f"{BUCKET_NAME}/pipeline_root/{APP_NAME}"
print(f"Kubeflow Pipelines SDK version = {kfp.__version__}")
print(
f"Google Cloud Pipeline Components version = {google_cloud_pipeline_components.__version__}"
)
print(f"Pipeline Root = {PIPELINE_ROOT}")
```
## High Level Flow of Building a Pipeline
Following is the high level flow to define and submit a pipeline on Vertex AI Pipelines:
1. Define pipeline components involved in training and deploying a PyTorch model
2. Define a pipeline by stitching the components in the workflow including pre-built [Google Cloud pipeline components](https://cloud.google.com/vertex-ai/docs/pipelines/components-introduction) and custom components
3. Compile and submit the pipeline to Vertex AI Pipelines service to run the workflow
4. Monitor the pipeline and analyze the metrics and artifacts generated

This notebook builds on the training and serving code developed in previously this [notebook](../pytorch-text-classification-vertex-ai-train-tune-deploy.ipynb).
---
### Concepts of a Pipeline
Let's look at the terminology and concepts used in [Kubeflow Pipelines SDK v2](https://www.kubeflow.org/docs/components/pipelines/sdk-v2/).

- **Component:** A component is a self-contained set of code performing a single task in a ML workflow, for example, training a model. A component interface is composed of inputs, outputs and a container image that the component’s code runs in - including an executable code and environment definition.
- **Pipeline:** A pipeline is composed of modular tasks defined as components that are chained together via inputs and outputs. Pipeline definition includes configuration such as parameters required to run the pipeline. Each component in a pipeline executes independently and the data (inputs and outputs) is passed between the components in a serialized format.
- **Inputs & Outputs:** Component’s inputs and outputs must be annotated with data type, which makes input or output a parameter or an artifact.
- **Parameters:** Parameters are inputs or outputs to support simple data types such as `str`, `int`, `float`, `bool`, `dict`, `list`. Input parameters are always passed by value between the components and are stored in the [Vertex ML Metadata](https://cloud.google.com/vertex-ai/docs/ml-metadata/introduction) service.
- **Artifacts:** Artifacts are references to the objects or files produced by pipeline runs that are passed as inputs or outputs. Artifacts support rich or larger data types such as datasets, models, metrics, visualizations that are written as files or objects. Artifacts are defined by name, uri and metadata which is stored automatically in the Vertex ML Metadata service and the actual content of artifacts is referred to a path in Cloud Storage bucket. Input artifacts are always passed by reference.
Learn more about KFP SDK v2 concepts [here](https://www.kubeflow.org/docs/components/pipelines/sdk-v2/).
---
### Pipeline schematic
Following is the high level pipeline schematic with tasks involved in the pipeline for the PyTorch based text classification model including input and outputs:

- **Build custom training image:** This step builds a custom training container image from the training application code and associated Dockerfile with the dependencies. The output from this step is the Container or Artifact registry URI of the custom training container.
- **Run the custom training job to train and evaluate the model:** This step downloads and preprocesses training data from IMDB sentiment classification dataset on HuggingFace, then trains and evaluates a model on the custom training container from the previous step. The step outputs Cloud Storage path to the trained model artifacts and the model performance metrics.
- **Package model artifacts:** This step packages trained model artifacts including custom prediction handler to create a model archive (.mar) file using Torch Model Archiver tool. The output from this step is the location of model archive (.mar) file on GCS.
- **Build custom serving image:** The step builds a custom serving container running TorchServe HTTP server to serve prediction requests for the models mounted. The output from this step is the Container or Artifact registry URI to the custom serving container.
- **Upload model with custom serving container:** This step creates a model resource using the custom serving image and MAR file from the previous steps.
- **Create an endpoint:** This step creates a Vertex AI Endpoint to provide a service URL where the prediction requests are sent.
- **Deploy model to endpoint for serving:** This step deploys the model to the endpoint created that creates necessary compute resources (based on the machine spec configured) to serve online prediction requests.
- **Validate deployment:** This step sends test requests to the endpoint and validates the deployment.
## Define the Pipeline Components for PyTorch based ML Workflow
The pipeline uses a mix of pre-built components from [Google Cloud Pipeline Components SDK](https://cloud.google.com/vertex-ai/docs/pipelines/components-introduction) to interact with Google Cloud services such as Vertex AI and define custom components for some steps in the pipeline. This section of the notebook defines custom components to perform the tasks in the pipeline using [KFP SDK v2 component spec](https://www.kubeflow.org/docs/components/pipelines/sdk-v2/component-development/).
**Create pipeline directory locally to save the component and pipeline specifications**
```
!mkdir -p ./pipelines
```
### 1. Component: Build Custom Training Container Image
This step builds a custom training container image using Cloud Build. The build job pulls the training application code and associated `Dockerfile` with the dependencies from GCS location and build/push the custom training container image to Container Registry.
- **Inputs**: The inputs to this component are GCS path to the training application code and Dockerfile.
- **Outputs**: The output from this step is the Container or Artifact registry URI of the custom training container.
**Create `Dockerfile` from PyTorch GPU image as base, install required dependencies and copy training application code**
```
%%writefile ./custom_container/Dockerfile
# Use pytorch GPU base image
# FROM gcr.io/cloud-aiplatform/training/pytorch-gpu.1-7
FROM us-docker.pkg.dev/vertex-ai/training/pytorch-gpu.1-10:latest
# set working directory
WORKDIR /app
# Install required packages
RUN pip install google-cloud-storage transformers datasets tqdm cloudml-hypertune
# Copies the trainer code to the docker image.
COPY ./trainer/__init__.py /app/trainer/__init__.py
COPY ./trainer/experiment.py /app/trainer/experiment.py
COPY ./trainer/utils.py /app/trainer/utils.py
COPY ./trainer/metadata.py /app/trainer/metadata.py
COPY ./trainer/model.py /app/trainer/model.py
COPY ./trainer/task.py /app/trainer/task.py
# Set up the entry point to invoke the trainer.
ENTRYPOINT ["python", "-m", "trainer.task"]
```
**Copy training application code and `Dockerfile` from local path to GCS location**
```
# copy training Dockerfile
!gsutil cp ./custom_container/Dockerfile {BUCKET_NAME}/{APP_NAME}/train/
# copy training application code
!gsutil cp -r ./python_package/trainer/ {BUCKET_NAME}/{APP_NAME}/train/
# list copied files from GCS location
!gsutil ls -Rl {BUCKET_NAME}/{APP_NAME}/train/
print(
f"Copied training application code and Dockerfile to {BUCKET_NAME}/{APP_NAME}/train/"
)
```
**Define custom pipeline component to build custom training container**
```
@component(
base_image="gcr.io/google.com/cloudsdktool/cloud-sdk:latest",
packages_to_install=["google-cloud-build"],
output_component_file="./pipelines/build_custom_train_image.yaml",
)
def build_custom_train_image(
project: str, gs_train_src_path: str, training_image_uri: str
) -> NamedTuple("Outputs", [("training_image_uri", str)]):
"""custom pipeline component to build custom training image using
Cloud Build and the training application code and dependencies
defined in the Dockerfile
"""
import logging
import os
from google.cloud.devtools import cloudbuild_v1 as cloudbuild
from google.protobuf.duration_pb2 import Duration
# initialize client for cloud build
logging.getLogger().setLevel(logging.INFO)
build_client = cloudbuild.services.cloud_build.CloudBuildClient()
# parse step inputs to get path to Dockerfile and training application code
gs_dockerfile_path = os.path.join(gs_train_src_path, "Dockerfile")
gs_train_src_path = os.path.join(gs_train_src_path, "trainer/")
logging.info(f"training_image_uri: {training_image_uri}")
# define build steps to pull the training code and Dockerfile
# and build/push the custom training container image
build = cloudbuild.Build()
build.steps = [
{
"name": "gcr.io/cloud-builders/gsutil",
"args": ["cp", "-r", gs_train_src_path, "."],
},
{
"name": "gcr.io/cloud-builders/gsutil",
"args": ["cp", gs_dockerfile_path, "Dockerfile"],
},
# enabling Kaniko cache in a Docker build that caches intermediate
# layers and pushes image automatically to Container Registry
# https://cloud.google.com/build/docs/kaniko-cache
{
"name": "gcr.io/kaniko-project/executor:latest",
"args": [f"--destination={training_image_uri}", "--cache=true"],
},
]
# override default timeout of 10min
timeout = Duration()
timeout.seconds = 7200
build.timeout = timeout
# create build
operation = build_client.create_build(project_id=project, build=build)
logging.info("IN PROGRESS:")
logging.info(operation.metadata)
# get build status
result = operation.result()
logging.info("RESULT:", result.status)
# return step outputs
return (training_image_uri,)
```
There are a few things to notice about the component specification:
- The standalone function defined is converted as a pipeline component using the [`@kfp.v2.dsl.component`](https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/v2/components/component_decorator.py) decorator.
- All the arguments in the standalone function must have data type annotations because KFP uses the function’s inputs and outputs to define the component’s interface.
- By default Python 3.7 is used as the base image to run the code defined. You can [configure the `@component` decorator](https://www.kubeflow.org/docs/components/pipelines/sdk-v2/python-function-components/#building-python-function-based-components) to override the default image by specifying `base_image`, install additional python packages using `packages_to_install` parameter and write the compiled component file as a YAML file using `output_component_file` to share or reuse the component.
### 2. Component: Get Custom Training Job Details from Vertex AI
This step gets details from a custom training job from Vertex AI including training elapsed time, model performance metrics that will be used in the next step before the model deployment. The step additionally creates [Model](https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/v2/components/types/artifact_types.py#L77) artifact with trained model artifacts.
**NOTE:** The pre-built [custom job component](https://google-cloud-pipeline-components.readthedocs.io/en/google-cloud-pipeline-components-0.2.1/google_cloud_pipeline_components.experimental.custom_job.html) used in the pipeline outputs CustomJob resource but not the model artifacts.
- **Inputs**:
- **`job_resource`:** Custom job resource returned by pre-built CustomJob component
- **`project`:** Project ID where the job ran
- **`region`:** Region where the job ran
- **`eval_metric_key`:** Evaluation metric key name such as eval_accuracy
- **`model_display_name`:** Model display name for saving model artifacts
- **Outputs**:
- **`model`**: Trained model artifacts created by the training job with added model metadata
- **`metrics`**: Model performance metrics captured from the training job
```
@component(
base_image="python:3.9",
packages_to_install=[
"google-cloud-pipeline-components",
"google-cloud-aiplatform",
"pandas",
"fsspec",
],
output_component_file="./pipelines/get_training_job_details.yaml",
)
def get_training_job_details(
project: str,
location: str,
job_resource: str,
eval_metric_key: str,
model_display_name: str,
metrics: Output[Metrics],
model: Output[Model],
) -> NamedTuple(
"Outputs", [("eval_metric", float), ("eval_loss", float), ("model_artifacts", str)]
):
"""custom pipeline component to get model artifacts and performance
metrics from custom training job
"""
import logging
import shutil
from collections import namedtuple
import pandas as pd
from google.cloud.aiplatform import gapic as aip
from google.protobuf.json_format import Parse
from google_cloud_pipeline_components.proto.gcp_resources_pb2 import \
GcpResources
# parse training job resource
logging.info(f"Custom job resource = {job_resource}")
training_gcp_resources = Parse(job_resource, GcpResources())
custom_job_id = training_gcp_resources.resources[0].resource_uri
custom_job_name = "/".join(custom_job_id.split("/")[-6:])
logging.info(f"Custom job name parsed = {custom_job_name}")
# get custom job information
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(location)
client_options = {"api_endpoint": API_ENDPOINT}
job_client = aip.JobServiceClient(client_options=client_options)
job_resource = job_client.get_custom_job(name=custom_job_name)
job_base_dir = job_resource.job_spec.base_output_directory.output_uri_prefix
logging.info(f"Custom job base output directory = {job_base_dir}")
# copy model artifacts
logging.info(f"Copying model artifacts to {model.path}")
destination = shutil.copytree(job_base_dir.replace("gs://", "/gcs/"), model.path)
logging.info(destination)
logging.info(f"Model artifacts located at {model.uri}/model/{model_display_name}")
logging.info(f"Model artifacts located at model.uri = {model.uri}")
# set model metadata
start, end = job_resource.start_time, job_resource.end_time
model.metadata["model_name"] = model_display_name
model.metadata["framework"] = "pytorch"
model.metadata["job_name"] = custom_job_name
model.metadata["time_to_train_in_seconds"] = (end - start).total_seconds()
# fetch metrics from the training job run
metrics_uri = f"{model.path}/model/{model_display_name}/all_results.json"
logging.info(f"Reading and logging metrics from {metrics_uri}")
metrics_df = pd.read_json(metrics_uri, typ="series")
for k, v in metrics_df.items():
logging.info(f" {k} -> {v}")
metrics.log_metric(k, v)
# capture eval metric and log to model metadata
eval_metric = (
metrics_df[eval_metric_key] if eval_metric_key in metrics_df.keys() else None
)
eval_loss = metrics_df["eval_loss"] if "eval_loss" in metrics_df.keys() else None
logging.info(f" {eval_metric_key} -> {eval_metric}")
logging.info(f' "eval_loss" -> {eval_loss}')
model.metadata[eval_metric_key] = eval_metric
model.metadata["eval_loss"] = eval_loss
# return output parameters
outputs = namedtuple("Outputs", ["eval_metric", "eval_loss", "model_artifacts"])
return outputs(eval_metric, eval_loss, job_base_dir)
```
### 3. Component: Create Model Archive (MAR) file using Torch Model Archiver
This step packages trained model artifacts and custom prediction handler (define in the earlier notebook) as a model archive (.mar) file usign [Torch Model Archiver](https://github.com/pytorch/serve/tree/master/model-archiver) tool.
- **Inputs**:
- **`model_display_name`:** Model display name for saving model archive file
- **`model_version`:** Model version for saving model archive file
- **`handler`:** Location of custom prediction handler
- **`model`:** Trained model artifacts from the previous step
- **Outputs**:
- **`model_mar`**: Packaged model archive file (artifact) on GCS
- **`mar_env`**: A list of environment variables required for creating model resource
- **`mar_export_uri`**: GCS path to the model archive file
**Copy custom prediction handler code from local path to GCS location**
**NOTE**: Custom prediction handler is defined in the [previous notebook](./pytorch-text-classification-vertex-ai-train-tune-deploy.ipynb)
```
# copy custom prediction handler
!gsutil cp ./predictor/custom_handler.py ./predictor/index_to_name.json {BUCKET_NAME}/{APP_NAME}/serve/predictor/
# list copied files from GCS location
!gsutil ls -lR {BUCKET_NAME}/{APP_NAME}/serve/
print(f"Copied custom prediction handler code to {BUCKET_NAME}/{APP_NAME}/serve/")
```
**Define custom pipeline component to create model archive file**
```
@component(
base_image="python:3.9",
packages_to_install=["torch-model-archiver"],
output_component_file="./pipelines/generate_mar_file.yaml",
)
def generate_mar_file(
model_display_name: str,
model_version: str,
handler: str,
model: Input[Model],
model_mar: Output[Model],
) -> NamedTuple("Outputs", [("mar_env_var", list), ("mar_export_uri", str)]):
"""custom pipeline component to package model artifacts and custom
handler to a model archive file using Torch Model Archiver tool
"""
import logging
import os
import subprocess
import time
from collections import namedtuple
from pathlib import Path
logging.getLogger().setLevel(logging.INFO)
# create directory to save model archive file
model_output_root = model.path
mar_output_root = model_mar.path
export_path = f"{mar_output_root}/model-store"
try:
Path(export_path).mkdir(parents=True, exist_ok=True)
except Exception as e:
logging.warning(e)
# retry after pause
time.sleep(2)
Path(export_path).mkdir(parents=True, exist_ok=True)
# parse and configure paths for model archive config
handler_path = (
handler.replace("gs://", "/gcs/") + "predictor/custom_handler.py"
if handler.startswith("gs://")
else handler
)
model_artifacts_dir = f"{model_output_root}/model/{model_display_name}"
extra_files = [
os.path.join(model_artifacts_dir, f)
for f in os.listdir(model_artifacts_dir)
if f != "pytorch_model.bin"
]
# define model archive config
mar_config = {
"MODEL_NAME": model_display_name,
"HANDLER": handler_path,
"SERIALIZED_FILE": f"{model_artifacts_dir}/pytorch_model.bin",
"VERSION": model_version,
"EXTRA_FILES": ",".join(extra_files),
"EXPORT_PATH": f"{model_mar.path}/model-store",
}
# generate model archive command
archiver_cmd = (
"torch-model-archiver --force "
f"--model-name {mar_config['MODEL_NAME']} "
f"--serialized-file {mar_config['SERIALIZED_FILE']} "
f"--handler {mar_config['HANDLER']} "
f"--version {mar_config['VERSION']}"
)
if "EXPORT_PATH" in mar_config:
archiver_cmd += f" --export-path {mar_config['EXPORT_PATH']}"
if "EXTRA_FILES" in mar_config:
archiver_cmd += f" --extra-files {mar_config['EXTRA_FILES']}"
if "REQUIREMENTS_FILE" in mar_config:
archiver_cmd += f" --requirements-file {mar_config['REQUIREMENTS_FILE']}"
# run archiver command
logging.warning("Running archiver command: %s", archiver_cmd)
with subprocess.Popen(
archiver_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
) as p:
_, err = p.communicate()
if err:
raise ValueError(err)
# set output variables
mar_env_var = [{"name": "MODEL_NAME", "value": model_display_name}]
mar_export_uri = f"{model_mar.uri}/model-store/"
outputs = namedtuple("Outputs", ["mar_env_var", "mar_export_uri"])
return outputs(mar_env_var, mar_export_uri)
```
### 4. Component: Create custom serving container running TorchServe
The step builds a [custom serving container](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements) running [TorchServe](https://pytorch.org/serve/) HTTP server to serve prediction requests for the models mounted. The output from this step is the Container registry URI to the custom serving container.
- **Inputs**:
- **`project`:** Project ID to run
- **`serving_image_uri`:** Custom serving container URI from Container registry
- **`gs_serving_dependencies_path`:** Location of serving dependencies - Dockerfile
- **Outputs**:
- **`serving_image_uri`**: Custom serving container URI from Container registry
**Create `Dockerfile` from TorchServe CPU image as base, install required dependencies and run TorchServe serve command**
```
%%bash -s $APP_NAME
APP_NAME=$1
cat << EOF > ./predictor/Dockerfile.serve
FROM pytorch/torchserve:latest-cpu
USER root
# run and update some basic packages software packages, including security libs
RUN apt-get update && \
apt-get install -y software-properties-common && \
add-apt-repository -y ppa:ubuntu-toolchain-r/test && \
apt-get update && \
apt-get install -y gcc-9 g++-9 apt-transport-https ca-certificates gnupg curl
# Install gcloud tools for gsutil as well as debugging
RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main" | \
tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | \
apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - && \
apt-get update -y && \
apt-get install google-cloud-sdk -y
USER model-server
# install dependencies
RUN python3 -m pip install --upgrade pip
RUN pip3 install transformers
ARG MODEL_NAME=$APP_NAME
ENV MODEL_NAME="\${MODEL_NAME}"
# health and prediction listener ports
ARG AIP_HTTP_PORT=7080
ENV AIP_HTTP_PORT="\${AIP_HTTP_PORT}"
ARG MODEL_MGMT_PORT=7081
# expose health and prediction listener ports from the image
EXPOSE "\${AIP_HTTP_PORT}"
EXPOSE "\${MODEL_MGMT_PORT}"
EXPOSE 8080 8081 8082 7070 7071
# create torchserve configuration file
USER root
RUN echo "service_envelope=json\n" \
"inference_address=http://0.0.0.0:\${AIP_HTTP_PORT}\n" \
"management_address=http://0.0.0.0:\${MODEL_MGMT_PORT}" >> \
/home/model-server/config.properties
USER model-server
# run Torchserve HTTP serve to respond to prediction requests
CMD ["echo", "AIP_STORAGE_URI=\${AIP_STORAGE_URI}", ";", \
"gsutil", "cp", "-r", "\${AIP_STORAGE_URI}/\${MODEL_NAME}.mar", "/home/model-server/model-store/", ";", \
"ls", "-ltr", "/home/model-server/model-store/", ";", \
"torchserve", "--start", "--ts-config=/home/model-server/config.properties", \
"--models", "\${MODEL_NAME}=\${MODEL_NAME}.mar", \
"--model-store", "/home/model-server/model-store"]
EOF
echo "Writing ./predictor/Dockerfile"
```
**Copy serving `Dockerfile` from local path to GCS location**
```
# copy serving Dockerfile
!gsutil cp ./predictor/Dockerfile.serve {BUCKET_NAME}/{APP_NAME}/serve/
# list copied files from GCS location
!gsutil ls -lR {BUCKET_NAME}/{APP_NAME}/serve/
print(f"Copied serving Dockerfile to {BUCKET_NAME}/{APP_NAME}/serve/")
```
**Define custom pipeline component to build custom serving container**
```
@component(
base_image="python:3.9",
packages_to_install=["google-cloud-build"],
output_component_file="./pipelines/build_custom_serving_image.yaml",
)
def build_custom_serving_image(
project: str, gs_serving_dependencies_path: str, serving_image_uri: str
) -> NamedTuple("Outputs", [("serving_image_uri", str)],):
"""custom pipeline component to build custom serving image using
Cloud Build and dependencies defined in the Dockerfile
"""
import logging
import os
from google.cloud.devtools import cloudbuild_v1 as cloudbuild
from google.protobuf.duration_pb2 import Duration
logging.getLogger().setLevel(logging.INFO)
build_client = cloudbuild.services.cloud_build.CloudBuildClient()
logging.info(f"gs_serving_dependencies_path: {gs_serving_dependencies_path}")
gs_dockerfile_path = os.path.join(gs_serving_dependencies_path, "Dockerfile.serve")
logging.info(f"serving_image_uri: {serving_image_uri}")
build = cloudbuild.Build()
build.steps = [
{
"name": "gcr.io/cloud-builders/gsutil",
"args": ["cp", gs_dockerfile_path, "Dockerfile"],
},
# enabling Kaniko cache in a Docker build that caches intermediate
# layers and pushes image automatically to Container Registry
# https://cloud.google.com/build/docs/kaniko-cache
{
"name": "gcr.io/kaniko-project/executor:latest",
"args": [f"--destination={serving_image_uri}", "--cache=true"],
},
]
# override default timeout of 10min
timeout = Duration()
timeout.seconds = 7200
build.timeout = timeout
# create build
operation = build_client.create_build(project_id=project, build=build)
logging.info("IN PROGRESS:")
logging.info(operation.metadata)
# get build status
result = operation.result()
logging.info("RESULT:", result.status)
# return step outputs
return (serving_image_uri,)
```
### 5. Component: Test model deployment making online prediction requests
This step sends test requests to the Vertex AI Endpoint and validates the deployment by sending test prediction requests. Deployment is considered successful when the response from model server returns text sentiment.
- **Inputs**:
- **`project`:** Project ID to run
- **`bucket`:** Staging GCS bucket path
- **`endpoint`:** Location of Vertex AI Endpoint from the Endpoint creation task
- **`instances`:** List of test prediction requests
- **Outputs**:
- None
```
@component(
base_image="python:3.9",
packages_to_install=["google-cloud-aiplatform", "google-cloud-pipeline-components"],
output_component_file="./pipelines/make_prediction_request.yaml",
)
def make_prediction_request(project: str, bucket: str, endpoint: str, instances: list):
"""custom pipeline component to pass prediction requests to Vertex AI
endpoint and get responses
"""
import base64
import logging
from google.cloud import aiplatform
from google.protobuf.json_format import Parse
from google_cloud_pipeline_components.proto.gcp_resources_pb2 import \
GcpResources
logging.getLogger().setLevel(logging.INFO)
aiplatform.init(project=project, staging_bucket=bucket)
# parse endpoint resource
logging.info(f"Endpoint = {endpoint}")
gcp_resources = Parse(endpoint, GcpResources())
endpoint_uri = gcp_resources.resources[0].resource_uri
endpoint_id = "/".join(endpoint_uri.split("/")[-8:-2])
logging.info(f"Endpoint ID = {endpoint_id}")
# define endpoint client
_endpoint = aiplatform.Endpoint(endpoint_id)
# call prediction endpoint for each instance
for instance in instances:
if not isinstance(instance, (bytes, bytearray)):
instance = instance.encode()
logging.info(f"Input text: {instance.decode('utf-8')}")
b64_encoded = base64.b64encode(instance)
test_instance = [{"data": {"b64": f"{str(b64_encoded.decode('utf-8'))}"}}]
response = _endpoint.predict(instances=test_instance)
logging.info(f"Prediction response: {response.predictions}")
```
## Define Pipeline Specification
The pipeline definition describes how input and output parameters and artifacts are passed between the steps.
**Set environment variables**
These environment variables will be used to define resource specifications such as training jobs, model resource etc.
```
os.environ["PROJECT_ID"] = PROJECT_ID
os.environ["BUCKET"] = BUCKET_NAME
os.environ["REGION"] = REGION
os.environ["APP_NAME"] = APP_NAME
```
**Create pipeline configuration file**
Pipeline configuration files helps in templatizing a pipeline enabling to run the same pipeline with different parameters.
```
%%writefile ./pipelines/pipeline_config.py
import os
from datetime import datetime
PROJECT_ID = os.getenv("PROJECT_ID", "")
BUCKET = os.getenv("BUCKET", "")
REGION = os.getenv("REGION", "us-central1")
APP_NAME = os.getenv("APP_NAME", "finetuned-bert-classifier")
VERSION = datetime.now().strftime("%Y%m%d%H%M%S")
MODEL_NAME = APP_NAME
MODEL_DISPLAY_NAME = f"{MODEL_NAME}-{VERSION}"
PIPELINE_NAME = f"pytorch-{APP_NAME}"
PIPELINE_ROOT = f"{BUCKET}/pipeline_root/{MODEL_NAME}"
GCS_STAGING = f"{BUCKET}/pipeline_root/{MODEL_NAME}"
TRAIN_IMAGE_URI = f"gcr.io/{PROJECT_ID}/pytorch_gpu_train_{MODEL_NAME}"
SERVE_IMAGE_URI = f"gcr.io/{PROJECT_ID}/pytorch_cpu_predict_{MODEL_NAME}"
MACHINE_TYPE = "n1-standard-8"
REPLICA_COUNT = "1"
ACCELERATOR_TYPE = "NVIDIA_TESLA_T4"
ACCELERATOR_COUNT = "1"
NUM_WORKERS = 1
SERVING_HEALTH_ROUTE = "/ping"
SERVING_PREDICT_ROUTE = f"/predictions/{MODEL_NAME}"
SERVING_CONTAINER_PORT= [{"containerPort": 7080}]
SERVING_MACHINE_TYPE = "n1-standard-4"
SERVING_MIN_REPLICA_COUNT = 1
SERVING_MAX_REPLICA_COUNT=1
SERVING_TRAFFIC_SPLIT='{"0": 100}'
```
**Define pipeline specification**
The pipeline is defined as a standalone Python function annotated with the [`@kfp.dsl.pipeline`](https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/v2/components/pipeline_context.py) decorator, specifying the pipeline's name and the root path where the pipeline's artifacts are stored.
The pipeline definition consists of both pre-built and custom defined components:
- Pre-built components from [Google Cloud Pipeline Components SDK](https://cloud.google.com/vertex-ai/docs/pipelines/components-introduction) are defined for tasks calling Vertex AI services such as submitting custom training job (`custom_job.CustomTrainingJobOp`), uploading a model (`ModelUploadOp`), creating an endpoint (`EndpointCreateOp`) and deploying a model to the endpoint (`ModelDeployOp`)
- Custom components are defined for tasks to build custom containers for training (`build_custom_train_image`), get training job details (`get_training_job_details`), create mar file (`generate_mar_file`) and serving (`build_custom_serving_image`) and validating the model deployment task (`ake_prediction_request`). Refer to the notebook for custom component specification for these tasks.
```
from pipelines import pipeline_config as cfg
@dsl.pipeline(
name=cfg.PIPELINE_NAME,
pipeline_root=cfg.PIPELINE_ROOT,
)
def pytorch_text_classifier_pipeline(
pipeline_job_id: str,
gs_train_script_path: str,
gs_serving_dependencies_path: str,
eval_acc_threshold: float,
is_hp_tuning_enabled: str = "n",
):
# ========================================================================
# build custom training container image
# ========================================================================
# build custom container for training job passing the
# GCS location of the training application code
build_custom_train_image_task = (
build_custom_train_image(
project=cfg.PROJECT_ID,
gs_train_src_path=gs_train_script_path,
training_image_uri=cfg.TRAIN_IMAGE_URI,
)
.set_caching_options(True)
.set_display_name("Build custom training image")
)
# ========================================================================
# model training
# ========================================================================
# train the model on Vertex AI by submitting a CustomJob
# using the custom container (no hyper-parameter tuning)
# define training code arguments
training_args = ["--num-epochs", "2", "--model-name", cfg.MODEL_NAME]
# define job name
JOB_NAME = f"{cfg.MODEL_NAME}-train-pytorch-cstm-cntr-{TIMESTAMP}"
GCS_BASE_OUTPUT_DIR = f"{cfg.GCS_STAGING}/{TIMESTAMP}"
# define worker pool specs
worker_pool_specs = [
{
"machine_spec": {
"machine_type": cfg.MACHINE_TYPE,
"accelerator_type": cfg.ACCELERATOR_TYPE,
"accelerator_count": cfg.ACCELERATOR_COUNT,
},
"replica_count": cfg.REPLICA_COUNT,
"container_spec": {"image_uri": cfg.TRAIN_IMAGE_URI, "args": training_args},
}
]
run_train_task = (
custom_job.CustomTrainingJobOp(
project=cfg.PROJECT_ID,
location=cfg.REGION,
display_name=JOB_NAME,
base_output_directory=GCS_BASE_OUTPUT_DIR,
worker_pool_specs=worker_pool_specs,
)
.set_display_name("Run custom training job")
.after(build_custom_train_image_task)
)
# ========================================================================
# get training job details
# ========================================================================
training_job_details_task = get_training_job_details(
project=cfg.PROJECT_ID,
location=cfg.REGION,
job_resource=run_train_task.output,
eval_metric_key="eval_accuracy",
model_display_name=cfg.MODEL_NAME,
).set_display_name("Get custom training job details")
# ========================================================================
# model deployment when condition is met
# ========================================================================
with dsl.Condition(
training_job_details_task.outputs["eval_metric"] > eval_acc_threshold,
name="model-deploy-decision",
):
# ===================================================================
# create model archive file
# ===================================================================
create_mar_task = generate_mar_file(
model_display_name=cfg.MODEL_NAME,
model_version=cfg.VERSION,
handler=gs_serving_dependencies_path,
model=training_job_details_task.outputs["model"],
).set_display_name("Create MAR file")
# ===================================================================
# build custom serving container running TorchServe
# ===================================================================
# build custom container for serving predictions using
# the trained model artifacts served by TorchServe
build_custom_serving_image_task = build_custom_serving_image(
project=cfg.PROJECT_ID,
gs_serving_dependencies_path=gs_serving_dependencies_path,
serving_image_uri=cfg.SERVE_IMAGE_URI,
).set_display_name("Build custom serving image")
# ===================================================================
# create model resource
# ===================================================================
# upload model to vertex ai
model_upload_task = (
aip_components.ModelUploadOp(
project=cfg.PROJECT_ID,
display_name=cfg.MODEL_DISPLAY_NAME,
serving_container_image_uri=cfg.SERVE_IMAGE_URI,
serving_container_predict_route=cfg.SERVING_PREDICT_ROUTE,
serving_container_health_route=cfg.SERVING_HEALTH_ROUTE,
serving_container_ports=cfg.SERVING_CONTAINER_PORT,
serving_container_environment_variables=create_mar_task.outputs[
"mar_env_var"
],
artifact_uri=create_mar_task.outputs["mar_export_uri"],
)
.set_display_name("Upload model")
.after(build_custom_serving_image_task)
)
# ===================================================================
# create Vertex AI Endpoint
# ===================================================================
# create endpoint to deploy one or more models
# An endpoint provides a service URL where the prediction requests are sent
endpoint_create_task = (
aip_components.EndpointCreateOp(
project=cfg.PROJECT_ID,
display_name=cfg.MODEL_NAME + "-endpoint",
)
.set_display_name("Create endpoint")
.after(create_mar_task)
)
# ===================================================================
# deploy model to Vertex AI Endpoint
# ===================================================================
# deploy models to endpoint to associates physical resources with the model
# so it can serve online predictions
model_deploy_task = aip_components.ModelDeployOp(
endpoint=endpoint_create_task.outputs["endpoint"],
model=model_upload_task.outputs["model"],
deployed_model_display_name=cfg.MODEL_NAME,
dedicated_resources_machine_type=cfg.SERVING_MACHINE_TYPE,
dedicated_resources_min_replica_count=cfg.SERVING_MIN_REPLICA_COUNT,
dedicated_resources_max_replica_count=cfg.SERVING_MAX_REPLICA_COUNT,
traffic_split=cfg.SERVING_TRAFFIC_SPLIT,
).set_display_name("Deploy model to endpoint")
# ===================================================================
# test model deployment
# ===================================================================
# test model deployment by making online prediction requests
test_instances = [
"Jaw dropping visual affects and action! One of the best I have seen to date.",
"Take away the CGI and the A-list cast and you end up with film with less punch.",
]
predict_test_instances_task = make_prediction_request(
project=cfg.PROJECT_ID,
bucket=cfg.BUCKET,
endpoint=model_deploy_task.outputs["gcp_resources"],
instances=test_instances,
).set_display_name("Test model deployment making online predictions")
predict_test_instances_task
```
Let’s unpack this code and understand a few things:
- A component’s inputs can be set from the pipeline's inputs (passed as arguments) or they can depend on the output of other components within this pipeline. For example, `ModelUploadOp` depends on custom serving container image URI from `build_custom_serving_image` task along with the pipeline’s inputs such as project id.
- `kfp.dsl.Condition` is a control structure with a group of tasks which runs only when the condition is met. In this pipeline, model deployment steps run only when the trained model performance exceeds the set threshold. If not, those steps are skipped.
- Each component in the pipeline runs within its own container image. You can specify machine type for each pipeline step such as CPU, GPU and memory limits. By default, each component runs as a Vertex AI CustomJob using an e2-standard-4 machine.
- By default, pipeline execution caching is enabled. Vertex AI Pipelines service checks to see whether an execution of each pipeline step exists in Vertex ML metadata. It uses a combination of pipeline name, step’s inputs, output and component specification. When a matching execution already exists, the step is skipped and thereby reducing costs. Execution caching can be turned off at task level or at pipeline level.
Following is the runtime graph generated for this pipeline

To learn more about building pipelines, refer to the [building Kubeflow pipelines](https://cloud.google.com/vertex-ai/docs/pipelines/build-pipeline#build-pipeline) section, and follow the [pipelines samples and tutorials](https://cloud.google.com/vertex-ai/docs/pipelines/notebooks#general-tutorials).
## Submit Pipeline
#### Compile Pipeline Specification as JSON
After defining the pipeline, it must be compiled for [executing on Vertex AI Pipeline services](https://cloud.google.com/vertex-ai/docs/pipelines/run-pipeline). When the pipeline is compiled, the KFP SDK analyzes the data dependencies between the components to create a directed acyclic graph. The compiled pipeline is in JSON format with all information required to run the pipeline.
```
PIPELINE_JSON_SPEC_PATH = "./pipelines/pytorch_text_classifier_pipeline_spec.json"
compiler.Compiler().compile(
pipeline_func=pytorch_text_classifier_pipeline, package_path=PIPELINE_JSON_SPEC_PATH
)
```
#### Submit Pipeline for Execution on Vertex AI Pipelines
Pipeline is submitted to Vertex AI Pipelines by defining a PipelineJob using Vertex AI SDK for Python client, passing necessary pipeline inputs.
```
# initialize Vertex AI SDK
aiplatform.init(project=PROJECT_ID, location=REGION)
# define pipeline parameters
# NOTE: These parameters can be included in the pipeline config file as needed
PIPELINE_JOB_ID = f"pipeline-{APP_NAME}-{get_timestamp()}"
TRAIN_APP_CODE_PATH = f"{BUCKET_NAME}/{APP_NAME}/train/"
SERVE_DEPENDENCIES_PATH = f"{BUCKET_NAME}/{APP_NAME}/serve/"
pipeline_params = {
"pipeline_job_id": PIPELINE_JOB_ID,
"gs_train_script_path": TRAIN_APP_CODE_PATH,
"gs_serving_dependencies_path": SERVE_DEPENDENCIES_PATH,
"eval_acc_threshold": 0.87,
"is_hp_tuning_enabled": "n",
}
# define pipeline job
pipeline_job = pipeline_jobs.PipelineJob(
display_name=cfg.PIPELINE_NAME,
job_id=PIPELINE_JOB_ID,
template_path=PIPELINE_JSON_SPEC_PATH,
pipeline_root=PIPELINE_ROOT,
parameter_values=pipeline_params,
enable_caching=True,
)
```
**When the pipeline is submitted, the logs show a link to view the pipeline run on Google Cloud Console or access the run by opening [Pipelines dashboard on Vertex AI](https://console.cloud.google.com/vertex-ai/pipelines)**
```
# submit pipeline job for execution
response = pipeline_job.run(sync=True)
response
```
## Monitoring the Pipeline
You can monitor the progress of a pipeline execution by navigating to the [Vertex AI Pipelines dashboard](https://console.cloud.google.com/vertex-ai/pipelines).
```
INFO:google.cloud.aiplatform.pipeline_jobs:Creating PipelineJob
INFO:google.cloud.aiplatform.pipeline_jobs:PipelineJob created. Resource name: projects/<project-id>/locations/<region>/pipelineJobs/pipeline-finetuned-bert-classifier-20220119061941
INFO:google.cloud.aiplatform.pipeline_jobs:To use this PipelineJob in another session:
INFO:google.cloud.aiplatform.pipeline_jobs:pipeline_job = aiplatform.PipelineJob.get('projects/<project-id>/locations/<region>/pipelineJobs/pipeline-finetuned-bert-classifier-20220119061941')
INFO:google.cloud.aiplatform.pipeline_jobs:View Pipeline Job:
https://console.cloud.google.com/vertex-ai/locations/region/pipelines/runs/pipeline-finetuned-bert-classifier-20220119061941?project=<project-id>
```
#### Component Execution Logs
Since every step in the pipeline runs in its own container or as a remote job (such as Dataflow, Dataproc job), you can view the step logs by clicking on "View Logs" button on a step.

#### Artifacts and Lineage
In the pipeline graph, you can notice the small boxes after each step. Those are artifacts generated from the step. For example, "Create MAR file" step generates MAR file as an artifact. Click on the artifact to know more details about it.

You can track lineage of an artifact describing its relationship with the steps in the pipeline. Vertex AI Pipelines automatically tracks the metadata and lineage. This lineage aids in establishing model governance and reproducibility. Click on "View Lineage" button on an artifact and it shows you the lineage graph as below.

#### Comparing Pipeline runs with the Vertex AI SDK
When running pipeline executions for different experiments, you may want to compare the metrics across the pipeline runs. You can [compare pipeline runs](https://cloud.google.com/vertex-ai/docs/pipelines/visualize-pipeline#compare_pipeline_runs_using) from the Vertex AI Pipelines dashboard.
Alternatively, you can use `aiplatform.get_pipeline_df()` method from Vertex AI SDK for Python that fetches pipeline execution metadata for a pipeline and returns a Pandas dataframe.
```
# underscores are not supported in the pipeline name, so
# replace underscores with hyphen
df_pipeline = aiplatform.get_pipeline_df(pipeline=cfg.PIPELINE_NAME.replace("_", "-"))
df_pipeline
```
## Cleaning up
### Cleaning up training and deployment resources
To clean up all Google Cloud resources used in this notebook, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
- Training Jobs
- Model
- Endpoint
- Cloud Storage Bucket
- Container Images
- Pipeline runs
Set flags for the resource type to be deleted
```
delete_custom_job = False
delete_hp_tuning_job = False
delete_endpoint = False
delete_model = False
delete_bucket = False
delete_image = False
delete_pipeline_job = False
```
Define clients for jobs, models and endpoints
```
# API Endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex AI location root path for your dataset, model and endpoint resources
PARENT = f"projects/{PROJECT_ID}/locations/{REGION}"
client_options = {"api_endpoint": API_ENDPOINT}
# Initialize Vertex SDK
aiplatform.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
# functions to create client
def create_job_client():
client = aip.JobServiceClient(client_options=client_options)
return client
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_pipeline_client():
client = aip.PipelineServiceClient(client_options=client_options)
return client
clients = {}
clients["job"] = create_job_client()
clients["model"] = create_model_client()
clients["endpoint"] = create_endpoint_client()
clients["pipeline"] = create_pipeline_client()
```
Define functions to list the jobs, models and endpoints starting with APP_NAME defined earlier in the notebook
```
def list_custom_jobs():
client = clients["job"]
jobs = []
response = client.list_custom_jobs(parent=PARENT)
for row in response:
_row = MessageToDict(row._pb)
if _row["displayName"].startswith(APP_NAME):
jobs.append((_row["name"], _row["displayName"]))
return jobs
def list_hp_tuning_jobs():
client = clients["job"]
jobs = []
response = client.list_hyperparameter_tuning_jobs(parent=PARENT)
for row in response:
_row = MessageToDict(row._pb)
if _row["displayName"].startswith(APP_NAME):
jobs.append((_row["name"], _row["displayName"]))
return jobs
def list_models():
client = clients["model"]
models = []
response = client.list_models(parent=PARENT)
for row in response:
_row = MessageToDict(row._pb)
if _row["displayName"].startswith(APP_NAME):
models.append((_row["name"], _row["displayName"]))
return models
def list_endpoints():
client = clients["endpoint"]
endpoints = []
response = client.list_endpoints(parent=PARENT)
for row in response:
_row = MessageToDict(row._pb)
if _row["displayName"].startswith(APP_NAME):
endpoints.append((_row["name"], _row["displayName"]))
return endpoints
def list_pipelines():
client = clients["pipeline"]
pipelines = []
request = aip.ListPipelineJobsRequest(
parent=PARENT, filter=f'display_name="{cfg.PIPELINE_NAME}"', order_by="end_time"
)
response = client.list_pipeline_jobs(request=request)
for row in response:
_row = MessageToDict(row._pb)
pipelines.append(_row["name"])
return pipelines
```
### Deleting custom training jobs
```
# Delete the custom training using the Vertex AI fully qualified identifier for the custom training
try:
if delete_custom_job:
custom_jobs = list_custom_jobs()
for job_id, job_name in custom_jobs:
print(f"Deleting job {job_id} [{job_name}]")
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
```
### Deleting hyperparameter tuning jobs
```
# Delete the hyperparameter tuning jobs using the Vertex AI fully qualified identifier for the hyperparameter tuning job
try:
if delete_hp_tuning_job:
hp_tuning_jobs = list_hp_tuning_jobs()
for job_id, job_name in hp_tuning_jobs:
print(f"Deleting job {job_id} [{job_name}]")
clients["job"].delete_hyperparameter_tuning_job(name=job_id)
except Exception as e:
print(e)
```
### Undeploy models and Delete endpoints
```
# Delete the endpoint using the Vertex AI fully qualified identifier for the endpoint
try:
if delete_endpoint:
endpoints = list_endpoints()
for endpoint_id, endpoint_name in endpoints:
endpoint = aiplatform.Endpoint(endpoint_id)
# undeploy models from the endpoint
print(f"Undeploying all deployed models from the endpoint {endpoint_name}")
endpoint.undeploy_all(sync=True)
# deleting endpoint
print(f"Deleting endpoint {endpoint_id} [{endpoint_name}]")
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
```
### Deleting models
```
# Delete the model using the Vertex AI fully qualified identifier for the model
try:
if delete_model:
models = list_models()
for model_id, model_name in models:
print(f"Deleting model {model_id} [{model_name}]")
clients["model"].delete_model(name=model_id)
except Exception as e:
print(e)
```
### Deleting pipeline runs
```
# Delete the pipeline execution using the Vertex AI fully qualified identifier for the pipeline job
try:
if delete_pipeline_job:
pipelines = list_pipelines()
for pipeline_name in pipelines[:1]:
print(f"Deleting pipeline run {pipeline_name}")
if delete_custom_job:
print("\t Deleting underlying custom jobs")
pipeline_job = clients["pipeline"].get_pipeline_job(name=pipeline_name)
pipeline_job = MessageToDict(pipeline_job._pb)
task_details = pipeline_job["jobDetail"]["taskDetails"]
for task in tasks:
if "containerDetail" in task["executorDetail"]:
custom_job_id = task["executorDetail"]["containerDetail"][
"mainJob"
]
print(
f"\t Deleting custom job {custom_job_id} for task {task['taskName']}"
)
clients["job"].delete_custom_job(name=custom_job_id)
clients["pipeline"].delete_pipeline_job(name=pipeline_name)
except Exception as e:
print(e)
```
### Delete contents from the staging bucket
---
***NOTE: Everything in this Cloud Storage bucket will be DELETED. Please run it with caution.***
---
```
if delete_bucket and "BUCKET_NAME" in globals():
print(f"Deleting all contents from the bucket {BUCKET_NAME}")
shell_output = ! gsutil du -as $BUCKET_NAME
print(
f"Size of the bucket {BUCKET_NAME} before deleting = {shell_output[0].split()[0]} bytes"
)
# uncomment below line to delete contents of the bucket
# ! gsutil rm -r $BUCKET_NAME
shell_output = ! gsutil du -as $BUCKET_NAME
if float(shell_output[0].split()[0]) > 0:
print(
"PLEASE UNCOMMENT LINE TO DELETE BUCKET. CONTENT FROM THE BUCKET NOT DELETED"
)
print(
f"Size of the bucket {BUCKET_NAME} after deleting = {shell_output[0].split()[0]} bytes"
)
```
### Delete images from Container Registry
Deletes all the container images created in this tutorial with prefix defined by variable APP_NAME from the registry. All associated tags are also deleted.
```
gcr_images = !gcloud container images list --repository=gcr.io/$PROJECT_ID --filter="name~"$APP_NAME
if delete_image:
for image in gcr_images:
if image != "NAME": # skip header line
print(f"Deleting image {image} including all tags")
!gcloud container images delete $image --force-delete-tags --quiet
```
### Cleaning up Notebook Environment
After you are done experimenting, you can either STOP or DELETE the AI Notebook instance to prevent any charges. If you want to save your work, you can choose to stop the instance instead.
```
# Stopping Notebook instance
gcloud notebooks instances stop example-instance --location=us-central1-a
# Deleting Notebook instance
gcloud notebooks instances delete example-instance --location=us-central1-a
```
| github_jupyter |
# Energy Meter Examples
## Monsoon Power Monitor
*NOTE*: the **monsoon.py** tool is required to collect data from the power monitor.
Instructions on how to install it can be found here:
https://github.com/ARM-software/lisa/wiki/Energy-Meters-Requirements#monsoon-power-monitor.
```
import logging
from conf import LisaLogging
LisaLogging.setup()
```
#### Import required modules
```
# Generate plots inline
%matplotlib inline
import os
# Support to access the remote target
import devlib
from env import TestEnv
# RTApp configurator for generation of PERIODIC tasks
from wlgen import RTA, Ramp
```
## Target Configuration
The target configuration is used to describe and configure your test environment.
You can find more details in **examples/utils/testenv_example.ipynb**.
```
# Let's assume the monsoon binary is installed in the following path
MONSOON_BIN = os.path.join(os.getenv('LISA_HOME'), 'tools', 'scripts', 'monsoon.py')
# Setup target configuration
my_conf = {
# Target platform and board
"platform" : 'android',
"board" : 'wahoo',
# Android tools
"ANDROID_HOME" : "/home/derkling/Code/lisa/tools/android-sdk-linux",
# Folder where all the results will be collected
"results_dir" : "EnergyMeter_Monsoon",
# Define devlib modules to load
"exclude_modules" : [ 'hwmon' ],
# Energy Meters Configuration for ARM Energy Probe
"emeter" : {
"instrument" : "monsoon",
"conf" : {
'monsoon_bin' : MONSOON_BIN,
},
},
# Tools required by the experiments
"tools" : [ 'trace-cmd', 'rt-app' ],
# Comment this line to calibrate RTApp in your own platform
"rtapp-calib" : {"0": 360, "1": 142, "2": 138, "3": 352, "4": 352, "5": 353},
}
# Once powered the Monsoon Power Monitor does not enable the output voltage.
# Since the devlib's API expects that the device is powered and available for
# an ADB connection, let's manually power on the device before initializing the TestEnv
# Power on the device
!$MONSOON_BIN --device /dev/ttyACM1 --voltage 4.2
# Enable USB passthrough to be able to connect the device
!$MONSOON_BIN --usbpassthrough on
# Initialize a test environment using:
te = TestEnv(my_conf, wipe=False, force_new=True)
target = te.target
# If your device support charge via USB, let's disable it in order
# to read the overall power consumption from the main output channel
# For example, this is the API for a Pixel phone:
te.target.write_value('/sys/class/power_supply/battery/charging_enabled', 0)
```
## Workload Execution and Power Consumptions Samping
Detailed information on RTApp can be found in **examples/wlgen/rtapp_example.ipynb**.
Each **EnergyMeter** derived class has two main methods: **reset** and **report**.
- The **reset** method will reset the energy meter and start sampling from channels specified in the target configuration. <br>
- The **report** method will stop capture and will retrieve the energy consumption data. This returns an EnergyReport composed of the measured channels energy and the report file. Each of the samples can also be obtained, as you can see below.
```
# Create and RTApp RAMP task
rtapp = RTA(te.target, 'ramp', calibration=te.calibration())
rtapp.conf(kind='profile',
params={
'ramp' : Ramp(
start_pct = 60,
end_pct = 20,
delta_pct = 5,
time_s = 0.5).get()
})
# EnergyMeter Start
te.emeter.reset()
rtapp.run(out_dir=te.res_dir)
# EnergyMeter Stop and samples collection
nrg_report = te.emeter.report(te.res_dir)
logging.info("Collected data:")
!tree $te.res_dir
```
## Power Measurements Data
```
logging.info("Measured channels energy:")
logging.info("%s", nrg_report.channels)
logging.info("Generated energy file:")
logging.info(" %s", nrg_report.report_file)
!cat $nrg_report.report_file
logging.info("Samples collected for the Output and Battery channels (only first 10)")
samples_file = os.path.join(te.res_dir, 'samples.csv')
!head $samples_file
logging.info("DataFrame of collected samples (only first 5)")
nrg_report.data_frame.head()
logging.info("Plot of collected power samples")
axes = nrg_report.data_frame[('output', 'power')].plot(
figsize=(16,8), drawstyle='steps-post');
axes.set_title('Power samples');
axes.set_xlabel('Time [s]');
axes.set_ylabel('Output power [W]');
logging.info("Plot of collected power samples")
nrg_report.data_frame.describe(percentiles=[0.90, 0.95, 0.99]).T
logging.info("Power distribution")
axes = nrg_report.data_frame[('output', 'power')].plot(
kind='hist', bins=32,
figsize=(16,8));
axes.set_title('Power Histogram');
axes.set_xlabel('Output power [W] buckets');
axes.set_ylabel('Samples per bucket');
```
| github_jupyter |
<a href="https://colab.research.google.com/github/conquerv0/Pynaissance/blob/master/1.%20Basic%20Framework/Data_Visualization.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Data Visualization Guide
This notebook features data visualization techniques in areas such as static 2D, 3D plotting and interactive 2D plotting using packages `matplotlib, ploly`.
__I. Static 2D Plotting__
```
# Import necessary packages and configuration.
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
# Optional for displaying inline
%matplotlib inline
# Sample data source for plotting
np.random.seed(1000)
y = np.random.standard_normal(20)
x = np.arange(len(y))
# Plotting
plt.plot(x, y)
plt.plot(y.cumsum())
# Some modification to plotting: Turning off the grid and create equal scalling for the two axes.
plt.plot(y.cumsum())
plt.grid(False)
plt.axis('equal')
```
Some options for `plt.axis()`
Parameter | Description
------------ | -------------
Empty | Returns current axis limit
off | Turns axis lines and labels off
equal | Leads to equal scalling
scaled | Produces equal scaling via dimension changes
tight | Makes all data visible(tighten limits)
image | Makes all data visible(with data limits)
[xmin, xmax, ymin, ymax] | Sets limits to given list of values
```
plt.plot(y.cumsum())
plt.xlim(-1, 20)
plt.ylim(np.min(y.cumsum())-1,
np.max(y.cumsum()) + 1)
# Add labelling in the plot for Readability
plt.xlabel('Index')
plt.ylabel('Value')
plt.title('Simple Plot')
plt.legend(loc=0)
```
Creating mutilple plots on one line
```
y = np.random.standard_normal((20, 2)).cumsum(axis=0)
plt.figure(figsize=(10, 6))
plt.subplot(121)
plt.plot(y[:, 0], lw=1.5, label='1st')
plt.plot(y[:, 0], 'r')
plt.xlabel('index')
plt.ylabel('value')
plt.title('1st Data Set')
# Second plot
plt.subplot(122)
plt.bar(np.arange(len(y)), y[:,1], width=0.5, color='b', label='2nd')
plt.legend(loc=0)
plt.xlabel('index')
plt.title('2nd Data Set')
```
__Other Plotting Style__
```
# Regular Scatter plot
y = np.random.standard_normal((1000, 2))
plt.figure(figsize=(10, 6))
plt.scatter(y[:, 0], y[:, 1], marker='o')
plt.xlabel('1st')
plt.ylabel('2nd')
plt.title('Scatter Plot')
# Integrate Color map to Scatter plot
c = np.random.randint(0, 10, len(y))
plt.figure(figsize=(10, 6))
plt.scatter(y[:, 0], y[:, 1], c=c, cmap='coolwarm', marker='o') # Define the dot to be marked as a bigger dot
plt.colorbar()
plt.xlabel('1st')
plt.ylabel('2nd')
plt.title('Scatter Plot with Color Map')
# Histogram
plt.figure(figsize=(10, 6))
plt.hist(y, label=['1st', '2nd'], bins=30)
plt.legend(loc=0)
plt.xlabel('value')
plt.ylabel('frequency')
plt.title('Histogram')
# Boxplot
fig, ax = plt.subplots(figsize=(10, 6))
plt.boxplot(y)
plt.setp(ax, xticklabels=['1st', '2nd'])
plt.xlabel('data set')
plt.ylabel('value')
plt.title('Boxplot')
# Plotting of mathematical function
def func(x):
return 0.5*np.exp(x)+1
a, b = 0.5, 1.5
x = np.linspace(0, 2)
y = func(x)
Ix = np.linspace(a, b) # Integral limits of x value
Iy = func(Ix) # Integral limits of y value
verts = [(a, 0)] + list(zip(Ix, Iy)) + [(b, 0)]
#
from matplotlib.patches import Polygon
fig, ax = plt.subplots(figsize = (10, 6))
plt.plot(x, y, 'b', linewidth=2)
plt.ylim(bottom=0)
poly = Polygon(verts, facecolor='0.7', edgecolor='0.5')
ax.add_patch(poly)
plt.text(0.5 * (a+b), 1, r'$\int_a^b f(x)\mathrm{d}x$',
horizontalalignment='center', fontsize=20) # Labelling for plot
plt.figtext(0.9, 0.075, '$x$')
plt.figtext(0.075, 0.9, '$f(x)$')
ax.set_xticks((a, b))
ax.set_xticklabels(('$a$', '$b$'))
ax.set_yticks([func(a), func(b)])
ax.set_yticklabels(('$f(a)$', '$f(b)$'))
```
__II. Static 3D Plotting__
Using `np.meshgrid()` function to generate a two-dimensional coordinates system out of two one-dimensional ndarray.
```
# Set a call option data values with
# Strike values = [50, 150]
# Time-to-Maturity = [0.5, 2.5]
strike = np.linspace(50, 150, 24)
ttm = np.linspace(0.5, 2.5, 24)
strike, ttm = np.meshgrid(strike, ttm)
strike[:2].round(2)
# Calculate implied volatility
iv = (strike - 100) ** 2 / (100 * strike) / ttm
iv[:5, :3]
```
Plotting a 3D figure using the generated Call options data with `Axes3D`
```
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(10, 6))
ax = fig.gca(projection = '3d')
surf = ax.plot_surface(strike, ttm, iv, rstride=2, cstride=2,
cmap = plt.cm.coolwarm, linewidth = 0.5, antialiased=True)
ax.set_xlabel('Strike Price')
ax.set_ylabel('Time-to-Maturity')
ax.set_zlabel('Implied Volatility')
fig.colorbar(surf, shrink = 0.5, aspect =5)
```
| github_jupyter |
# Doc2Vec to wikipedia articles
We conduct the replication to **Document Embedding with Paragraph Vectors** (http://arxiv.org/abs/1507.07998).
In this paper, they showed only DBOW results to Wikipedia data. So we replicate this experiments using not only DBOW but also DM.
## Basic Setup
Let's import Doc2Vec module.
```
from gensim.corpora.wikicorpus import WikiCorpus
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from pprint import pprint
import multiprocessing
```
## Preparing the corpus
First, download the dump of all Wikipedia articles from [here](http://download.wikimedia.org/enwiki/) (you want the file enwiki-latest-pages-articles.xml.bz2, or enwiki-YYYYMMDD-pages-articles.xml.bz2 for date-specific dumps).
Second, convert the articles to WikiCorpus. WikiCorpus construct a corpus from a Wikipedia (or other MediaWiki-based) database dump.
For more details on WikiCorpus, you should access [Corpus from a Wikipedia dump](https://radimrehurek.com/gensim/corpora/wikicorpus.html).
```
wiki = WikiCorpus("enwiki-latest-pages-articles.xml.bz2")
#wiki = WikiCorpus("enwiki-YYYYMMDD-pages-articles.xml.bz2")
```
Define **TaggedWikiDocument** class to convert WikiCorpus into suitable form for Doc2Vec.
```
class TaggedWikiDocument(object):
def __init__(self, wiki):
self.wiki = wiki
self.wiki.metadata = True
def __iter__(self):
for content, (page_id, title) in self.wiki.get_texts():
yield TaggedDocument([c.decode("utf-8") for c in content], [title])
documents = TaggedWikiDocument(wiki)
```
## Preprocessing
To set the same vocabulary size with original papar. We first calculate the optimal **min_count** parameter.
```
pre = Doc2Vec(min_count=0)
pre.scan_vocab(documents)
for num in range(0, 20):
print('min_count: {}, size of vocab: '.format(num), pre.scale_vocab(min_count=num, dry_run=True)['memory']['vocab']/700)
```
In the original paper, they set the vocabulary size 915,715. It seems similar size of vocabulary if we set min_count = 19. (size of vocab = 898,725)
## Training the Doc2Vec Model
To train Doc2Vec model by several method, DBOW and DM, we define the list of models.
```
cores = multiprocessing.cpu_count()
models = [
# PV-DBOW
Doc2Vec(dm=0, dbow_words=1, size=200, window=8, min_count=19, iter=10, workers=cores),
# PV-DM w/average
Doc2Vec(dm=1, dm_mean=1, size=200, window=8, min_count=19, iter =10, workers=cores),
]
models[0].build_vocab(documents)
print(str(models[0]))
models[1].reset_from(models[0])
print(str(models[1]))
```
Now we’re ready to train Doc2Vec of the English Wikipedia.
```
for model in models:
%%time model.train(documents)
```
## Similarity interface
After that, let's test both models! DBOW model show the simillar results with the original paper. First, calculating cosine simillarity of "Machine learning" using Paragraph Vector. Word Vector and Document Vector are separately stored. We have to add .docvecs after model name to extract Document Vector from Doc2Vec Model.
```
for model in models:
print(str(model))
pprint(model.docvecs.most_similar(positive=["Machine learning"], topn=20))
```
DBOW model interpret the word 'Machine Learning' as a part of Computer Science field, and DM model as Data Science related field.
Second, calculating cosine simillarity of "Lady Gaga" using Paragraph Vector.
```
for model in models:
print(str(model))
pprint(model.docvecs.most_similar(positive=["Lady Gaga"], topn=10))
```
DBOW model reveal the similar singer in the U.S., and DM model understand that many of Lady Gaga's songs are similar with the word "Lady Gaga".
Third, calculating cosine simillarity of "Lady Gaga" - "American" + "Japanese" using Document vector and Word Vectors. "American" and "Japanese" are Word Vectors, not Paragraph Vectors. Word Vectors are already converted to lowercases by WikiCorpus.
```
for model in models:
print(str(model))
vec = [model.docvecs["Lady Gaga"] - model["american"] + model["japanese"]]
pprint([m for m in model.docvecs.most_similar(vec, topn=11) if m[0] != "Lady Gaga"])
```
As a result, DBOW model demonstrate the similar artists with Lady Gaga in Japan such as 'Perfume', which is the Most famous Idol in Japan. On the other hand, DM model results don't include the Japanese aritsts in top 10 simillar documents. It's almost same with no vector calculated results.
This results demonstrate that DBOW employed in the original paper is outstanding for calculating the similarity between Document Vector and Word Vector.
| github_jupyter |
<img src='images/pic1.jpg'/>
```
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import sqlite3
import csv
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from wordcloud import WordCloud
import re
import os
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
import numpy as np
from sqlalchemy import create_engine # database connection
import datetime as dt
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.snowball import SnowballStemmer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import SGDClassifier
from sklearn import metrics
from sklearn.metrics import f1_score,precision_score,recall_score
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from skmultilearn.adapt import mlknn
from skmultilearn.problem_transform import ClassifierChain
from skmultilearn.problem_transform import BinaryRelevance
from skmultilearn.problem_transform import LabelPowerset
from sklearn.naive_bayes import GaussianNB
from datetime import datetime
import pickle
from sklearn.externals import joblib
```
# Stack Overflow: Tag Prediction
<h1>1. Business Problem </h1>
<h2> 1.1 Description </h2>
<p style='font-size:18px'><b> Description </b></p>
<p>
Stack Overflow is the largest, most trusted online community for developers to learn, share their programming knowledge, and build their careers.<br />
<br />
Stack Overflow is something which every programmer use one way or another. Each month, over 50 million developers come to Stack Overflow to learn, share their knowledge, and build their careers. It features questions and answers on a wide range of topics in computer programming. The website serves as a platform for users to ask and answer questions, and, through membership and active participation, to vote questions and answers up or down and edit questions and answers in a fashion similar to a wiki or Digg. As of April 2014 Stack Overflow has over 4,000,000 registered users, and it exceeded 10,000,000 questions in late August 2015. Based on the type of tags assigned to questions, the top eight most discussed topics on the site are: Java, JavaScript, C#, PHP, Android, jQuery, Python and HTML.<br />
<br />
</p>
<p style='font-size:18px'><b> Problem Statemtent </b></p>
Suggest the tags based on the content that was there in the question posted on Stackoverflow.
<h2> 1.2 Real World / Business Objectives and Constraints </h2>
1. Predict as many tags as possible with high precision and recall.
2. Incorrect tags could impact customer experience on StackOverflow.
3. No strict latency constraints.
<h1>2. Machine Learning problem </h1>
<h2> 2.1 Data </h2>
<h3> 2.1.1 Data Overview </h3>
Refer: https://www.kaggle.com/c/facebook-recruiting-iii-keyword-extraction/data
<br>
All of the data is in 2 files: Train and Test.<br />
<pre>
<b>Train.csv</b> contains 4 columns: Id,Title,Body,Tags.<br />
<b>Test.csv</b> contains the same columns but without the Tags, which you are to predict.<br />
<b>Size of Train.csv</b> - 6.75GB<br />
<b>Size of Test.csv</b> - 2GB<br />
<b>Number of rows in Train.csv</b> = 6034195<br />
</pre>
The questions are randomized and contains a mix of verbose text sites as well as sites related to math and programming. The number of questions from each site may vary, and no filtering has been performed on the questions (such as closed questions).<br />
<br />
__Data Field Explaination__
Dataset contains 6,034,195 rows. The columns in the table are:<br />
<pre>
<b>Id</b> - Unique identifier for each question<br />
<b>Title</b> - The question's title<br />
<b>Body</b> - The body of the question<br />
<b>Tags</b> - The tags associated with the question in a space-seperated format (all lowercase, should not contain tabs '\t' or ampersands '&')<br />
</pre>
<br />
<h3>2.1.2 Example Data point </h3>
<pre>
<b>Title</b>: Implementing Boundary Value Analysis of Software Testing in a C++ program?
<b>Body </b>: <pre><code>
#include<
iostream>\n
#include<
stdlib.h>\n\n
using namespace std;\n\n
int main()\n
{\n
int n,a[n],x,c,u[n],m[n],e[n][4];\n
cout<<"Enter the number of variables";\n cin>>n;\n\n
cout<<"Enter the Lower, and Upper Limits of the variables";\n
for(int y=1; y<n+1; y++)\n
{\n
cin>>m[y];\n
cin>>u[y];\n
}\n
for(x=1; x<n+1; x++)\n
{\n
a[x] = (m[x] + u[x])/2;\n
}\n
c=(n*4)-4;\n
for(int a1=1; a1<n+1; a1++)\n
{\n\n
e[a1][0] = m[a1];\n
e[a1][1] = m[a1]+1;\n
e[a1][2] = u[a1]-1;\n
e[a1][3] = u[a1];\n
}\n
for(int i=1; i<n+1; i++)\n
{\n
for(int l=1; l<=i; l++)\n
{\n
if(l!=1)\n
{\n
cout<<a[l]<<"\\t";\n
}\n
}\n
for(int j=0; j<4; j++)\n
{\n
cout<<e[i][j];\n
for(int k=0; k<n-(i+1); k++)\n
{\n
cout<<a[k]<<"\\t";\n
}\n
cout<<"\\n";\n
}\n
} \n\n
system("PAUSE");\n
return 0; \n
}\n
</code></pre>\n\n
<p>The answer should come in the form of a table like</p>\n\n
<pre><code>
1 50 50\n
2 50 50\n
99 50 50\n
100 50 50\n
50 1 50\n
50 2 50\n
50 99 50\n
50 100 50\n
50 50 1\n
50 50 2\n
50 50 99\n
50 50 100\n
</code></pre>\n\n
<p>if the no of inputs is 3 and their ranges are\n
1,100\n
1,100\n
1,100\n
(could be varied too)</p>\n\n
<p>The output is not coming,can anyone correct the code or tell me what\'s wrong?</p>\n'
<b>Tags </b>: 'c++ c'
</pre>
<h1> 3. Exploratory Data Analysis </h1>
<h2> 3.1 Data Loading and Cleaning </h2>
<h3>3.1.1 Using Pandas with SQLite to Load the data</h3>
```
import zipfile
archive = zipfile.ZipFile('Train.zip', 'r')
csvfile = archive.open('Train.csv')
#Creating db file from csv
#Learn SQL: https://www.w3schools.com/sql/default.asp
if not os.path.isfile('train.db'):
start = datetime.now()
disk_engine = create_engine('sqlite:///train.db')
start = dt.datetime.now()
chunksize = 180000
j = 0
index_start = 1
for df in pd.read_csv(csvfile, names=['Id', 'Title', 'Body', 'Tags'], chunksize=chunksize, iterator=True, encoding='utf-8', ):
df.index += index_start
j+=1
print('{} rows'.format(j*chunksize))
df.to_sql('data', disk_engine, if_exists='append')
index_start = df.index[-1] + 1
print("Time taken to run this cell :", datetime.now() - start)
```
<h3> 3.1.2 Counting the number of rows </h3>
```
if os.path.isfile('train.db'):
start = datetime.now()
con = sqlite3.connect('train.db')
num_rows = pd.read_sql_query("""SELECT count(*) FROM data""", con)
#Always remember to close the database
print("Number of rows in the database :","\n",num_rows['count(*)'].values[0])
con.close()
print("Time taken to count the number of rows :", datetime.now() - start)
else:
print("Please download the train.db file from drive or run the above cell to genarate train.db file")
```
<h3>3.1.3 Checking for duplicates </h3>
```
#Learn SQl: https://www.w3schools.com/sql/default.asp
if os.path.isfile('train.db'):
start = datetime.now()
con = sqlite3.connect('train.db')
df_no_dup = pd.read_sql_query('SELECT Title, Body, Tags, COUNT(*) as cnt_dup FROM data GROUP BY Title, Body, Tags', con)
con.close()
print("Time taken to run this cell :", datetime.now() - start)
else:
print("Please download the train.db file from drive or run the first to genarate train.db file")
df_no_dup.head()
# we can observe that there are duplicates
print("number of duplicate questions :", num_rows['count(*)'].values[0]- df_no_dup.shape[0], "(",(1-((df_no_dup.shape[0])/(num_rows['count(*)'].values[0])))*100,"% )")
# number of times each question appeared in our database
df_no_dup.cnt_dup.value_counts()
print(df_no_dup.head())
start = datetime.now()
aa_count=[]
hh=[]
for j in range(len(df_no_dup)):
tex=df_no_dup['Tags'][j]
#print(tex)
if tex is not None:
#print("heyram")
#start=datetime.now()
hh.append(tex)
text=len(tex.split(" ") )
#print(text)
aa_count.append(text)
print(len(aa_count))
aaa=pd.DataFrame(aa_count,columns=['tag_count'])
hhh=pd.DataFrame(hh,columns=['Tags'])
df_no_dup=pd.concat([hhh,aaa],axis=1)
# adding a new feature number of tags per question
print("Time taken to run this cell :", datetime.now() - start)
df_no_dup.head()
np.where(pd.isnull(df_no_dup))
df_no_dup=df_no_dup.dropna()
start = datetime.now()
df_no_dup["tag_count"] = df_no_dup["Tags"].apply(lambda text: len(text.split(" ")))
# adding a new feature number of tags per question
print("Time taken to run this cell :", datetime.now() - start)
df_no_dup.head()
# distribution of number of tags per question
df_no_dup.tag_count.value_counts()
#Creating a new database with no duplicates
if not os.path.isfile('train_no_dup.db'):
disk_dup = create_engine("sqlite:///train_no_dup.db")
no_dup = pd.DataFrame(df_no_dup, columns=['Title', 'Body', 'Tags'])
no_dup.to_sql('no_dup_train',disk_dup)
#This method seems more appropriate to work with this much data.
#creating the connection with database file.
if os.path.isfile('train_no_dup.db'):
start = datetime.now()
con = sqlite3.connect('train_no_dup.db')
tag_data = pd.read_sql_query("""SELECT Tags FROM no_dup_train""", con)
#Always remember to close the database
con.close()
# Let's now drop unwanted column.
tag_data.drop(tag_data.index[0], inplace=True)
#Printing first 5 columns from our data frame
tag_data.head()
print("Time taken to run this cell :", datetime.now() - start)
else:
print("Please download the train.db file from drive or run the above cells to genarate train.db file")
```
<h2> 3.2 Analysis of Tags </h2>
<h3> 3.2.1 Total number of unique tags </h3>
```
tag_data=tag_data.dropna()
# Taking only 0.5 million data points
#tag_data=tag_data[0:10000]
print(tag_data.head())
print(len(tag_data))
# Importing & Initializing the "CountVectorizer" object, which
#is scikit-learn's bag of words tool.
#by default 'split()' will tokenize each tag using space.
vectorizer = CountVectorizer(tokenizer = lambda x: x.split())
# fit_transform() does two functions: First, it fits the model
# and learns the vocabulary; second, it transforms our training data
# into feature vectors. The input to fit_transform should be a list of strings.
tag_dtm = vectorizer.fit_transform(tag_data['Tags'])
print("Number of data points :", tag_dtm.shape[0])
print("Number of unique tags :", tag_dtm.shape[1])
#'get_feature_name()' gives us the vocabulary.
tags = vectorizer.get_feature_names()
#Lets look at the tags we have.
print("Some of the tags we have :", tags[:10])
```
<h3> 3.2.3 Number of times a tag appeared </h3>
```
# https://stackoverflow.com/questions/15115765/how-to-access-sparse-matrix-elements
#Lets now store the document term matrix in a dictionary.
freqs = tag_dtm.sum(axis=0).A1
result = dict(zip(tags, freqs))
#print(result)
#Saving this dictionary to csv files.
if not os.path.isfile('tag_counts_dict_dtm.csv'):
with open('tag_counts_dict_dtm.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in result.items():
writer.writerow([key, value])
tag_df = pd.read_csv("tag_counts_dict_dtm.csv", names=['Tags', 'Counts'])
tag_df.head()
tag_df_sorted = tag_df.sort_values(['Counts'], ascending=False)
tag_counts = tag_df_sorted['Counts'].values
plt.plot(tag_counts)
plt.title("Distribution of number of times tag appeared questions")
plt.grid()
plt.xlabel("Tag number")
plt.ylabel("Number of times tag appeared")
plt.show()
plt.plot(tag_counts[0:10000])
plt.title('first 10k tags: Distribution of number of times tag appeared questions')
plt.grid()
plt.xlabel("Tag number")
plt.ylabel("Number of times tag appeared")
plt.show()
print(len(tag_counts[0:10000:25]), tag_counts[0:10000:25])
plt.plot(tag_counts[0:1000])
plt.title('first 1k tags: Distribution of number of times tag appeared questions')
plt.grid()
plt.xlabel("Tag number")
plt.ylabel("Number of times tag appeared")
plt.show()
print(len(tag_counts[0:1000:5]), tag_counts[0:1000:5])
plt.plot(tag_counts[0:500])
plt.title('first 500 tags: Distribution of number of times tag appeared questions')
plt.grid()
plt.xlabel("Tag number")
plt.ylabel("Number of times tag appeared")
plt.show()
print(len(tag_counts[0:500:5]), tag_counts[0:500:5])
plt.plot(tag_counts[0:100], c='b')
plt.scatter(x=list(range(0,100,5)), y=tag_counts[0:100:5], c='orange', label="quantiles with 0.05 intervals")
# quantiles with 0.25 difference
plt.scatter(x=list(range(0,100,25)), y=tag_counts[0:100:25], c='m', label = "quantiles with 0.25 intervals")
for x,y in zip(list(range(0,100,25)), tag_counts[0:100:25]):
plt.annotate(s="({} , {})".format(x,y), xy=(x,y), xytext=(x-0.05, y+500))
plt.title('first 100 tags: Distribution of number of times tag appeared questions')
plt.grid()
plt.xlabel("Tag number")
plt.ylabel("Number of times tag appeared")
plt.legend()
plt.show()
print(len(tag_counts[0:100:5]), tag_counts[0:100:5])
# Store tags greater than 10K in one list
lst_tags_gt_10k = tag_df[tag_df.Counts>10000].Tags
#Print the length of the list
print ('{} Tags are used more than 10000 times'.format(len(lst_tags_gt_10k)))
# Store tags greater than 100K in one list
lst_tags_gt_100k = tag_df[tag_df.Counts>100000].Tags
#Print the length of the list.
print ('{} Tags are used more than 100000 times'.format(len(lst_tags_gt_100k)))
```
<b>Observations:</b><br />
1. There are total 153 tags which are used more than 10000 times.
2. 14 tags are used more than 100000 times.
3. Most frequent tag (i.e. c#) is used 331505 times.
4. Since some tags occur much more frequenctly than others, Micro-averaged F1-score is the appropriate metric for this probelm.
<h3> 3.2.4 Tags Per Question </h3>
```
#Storing the count of tag in each question in list 'tag_count'
tag_quest_count = tag_dtm.sum(axis=1).tolist()
#Converting each value in the 'tag_quest_count' to integer.
tag_quest_count=[int(j) for i in tag_quest_count for j in i]
print ('We have total {} datapoints.'.format(len(tag_quest_count)))
print(tag_quest_count[:5])
print( "Maximum number of tags per question: %d"%max(tag_quest_count))
print( "Minimum number of tags per question: %d"%min(tag_quest_count))
print( "Avg. number of tags per question: %f"% ((sum(tag_quest_count)*1.0)/len(tag_quest_count)))
sns.countplot(tag_quest_count, palette='gist_rainbow')
plt.title("Number of tags in the questions ")
plt.xlabel("Number of Tags")
plt.ylabel("Number of questions")
plt.show()
```
<b>Observations:</b><br />
1. Maximum number of tags per question: 5
2. Minimum number of tags per question: 1
3. Avg. number of tags per question: 2.899
4. Most of the questions are having 2 or 3 tags
<h3> 3.2.5 The top 20 tags </h3>
```
i=np.arange(30)
tag_df_sorted.head(30).plot(kind='bar')
plt.title('Frequency of top 20 tags')
plt.xticks(i, tag_df_sorted['Tags'])
plt.xlabel('Tags')
plt.ylabel('Counts')
plt.show()
```
<b>Observations:</b><br />
1. Majority of the most frequent tags are programming language.
2. C# is the top most frequent programming language.
3. Android, IOS, Linux and windows are among the top most frequent operating systems.
<h3> 3.3 Cleaning and preprocessing of Questions </h3>
<h3> 3.3.1 Preprocessing </h3>
<ol>
<li> Sample 0.5M data points </li>
<li> Separate out code-snippets from Body </li>
<li> Remove Spcial characters from Question title and description (not in code)</li>
<li> Remove stop words (Except 'C') </li>
<li> Remove HTML Tags </li>
<li> Convert all the characters into small letters </li>
<li> Use SnowballStemmer to stem the words </li>
</ol>
```
import nltk
nltk.download('stopwords')
def striphtml(data):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, ' ', str(data))
return cleantext
stop_words = set(stopwords.words('english'))
stemmer = SnowballStemmer("english")
#http://www.sqlitetutorial.net/sqlite-python/create-tables/
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
:return: Connection object or None
"""
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return None
def create_table(conn, create_table_sql):
""" create a table from the create_table_sql statement
:param conn: Connection object
:param create_table_sql: a CREATE TABLE statement
:return:
"""
try:
c = conn.cursor()
c.execute(create_table_sql)
except Error as e:
print(e)
def checkTableExists(dbcon):
cursr = dbcon.cursor()
str = "select name from sqlite_master where type='table'"
table_names = cursr.execute(str)
print("Tables in the databse:")
tables =table_names.fetchall()
print(tables[0][0])
return(len(tables))
def create_database_table(database, query):
conn = create_connection(database)
if conn is not None:
create_table(conn, query)
checkTableExists(conn)
else:
print("Error! cannot create the database connection.")
conn.close()
sql_create_table = """CREATE TABLE IF NOT EXISTS QuestionsProcessed (question text NOT NULL, code text, tags text, words_pre integer, words_post integer, is_code integer);"""
create_database_table("Processed.db", sql_create_table)
```
__ we create a new data base to store the sampled and preprocessed questions __
```
nltk.download('punkt')
print("\n")
```
<h1>4. Machine Learning Models </h1>
<h2> 4.1 Converting tags for multilabel problems </h2>
<table>
<tr>
<th>X</th><th>y1</th><th>y2</th><th>y3</th><th>y4</th>
</tr>
<tr>
<td>x1</td><td>0</td><td>1</td><td>1</td><td>0</td>
</tr>
<tr>
<td>x1</td><td>1</td><td>0</td><td>0</td><td>0</td>
</tr>
<tr>
<td>x1</td><td>0</td><td>1</td><td>0</td><td>0</td>
</tr>
</table>
<h2> 4.5 Modeling with less data points (0.5M data points) and more weight to title and 500 tags only. </h2>
```
sql_create_table = """CREATE TABLE IF NOT EXISTS QuestionsProcessed (question text NOT NULL, code text, tags text, words_pre integer, words_post integer, is_code integer);"""
create_database_table("Titlemoreweight.db", sql_create_table)
# http://www.sqlitetutorial.net/sqlite-delete/
# https://stackoverflow.com/questions/2279706/select-random-row-from-a-sqlite-table
read_db = 'train_no_dup.db'
write_db = 'Titlemoreweight.db'
train_datasize = 400000
if os.path.isfile(read_db):
conn_r = create_connection(read_db)
if conn_r is not None:
reader =conn_r.cursor()
# for selecting first 0.5M rows
reader.execute("SELECT Title, Body, Tags From no_dup_train LIMIT 500001;")
# for selecting random points
#reader.execute("SELECT Title, Body, Tags From no_dup_train ORDER BY RANDOM() LIMIT 500001;")
if os.path.isfile(write_db):
conn_w = create_connection(write_db)
if conn_w is not None:
tables = checkTableExists(conn_w)
writer =conn_w.cursor()
if tables != 0:
writer.execute("DELETE FROM QuestionsProcessed WHERE 1")
print("Cleared All the rows")
```
<h3> 4.5.1 Preprocessing of questions </h3>
<ol>
<li> Separate Code from Body </li>
<li> Remove Spcial characters from Question title and description (not in code)</li>
<li> <b> Give more weightage to title : Add title three times to the question </b> </li>
<li> Remove stop words (Except 'C') </li>
<li> Remove HTML Tags </li>
<li> Convert all the characters into small letters </li>
<li> Use SnowballStemmer to stem the words </li>
</ol>
```
#http://www.bernzilla.com/2008/05/13/selecting-a-random-row-from-an-sqlite-table/
start = datetime.now()
preprocessed_data_list=[]
reader.fetchone()
questions_with_code=0
len_pre=0
len_post=0
questions_proccesed = 0
for row in reader:
is_code = 0
title, question, tags = row[0], row[1], str(row[2])
if '<code>' in question:
questions_with_code+=1
is_code = 1
x = len(question)+len(title)
len_pre+=x
code = str(re.findall(r'<code>(.*?)</code>', question, flags=re.DOTALL))
question=re.sub('<code>(.*?)</code>', '', question, flags=re.MULTILINE|re.DOTALL)
question=striphtml(question.encode('utf-8'))
title=title.encode('utf-8')
# adding title three time to the data to increase its weight
# add tags string to the training data
question=str(title)+" "+str(title)+" "+str(title)+" "+question
# if questions_proccesed<=train_datasize:
# question=str(title)+" "+str(title)+" "+str(title)+" "+question+" "+str(tags)
# else:
# question=str(title)+" "+str(title)+" "+str(title)+" "+question
question=re.sub(r'[^A-Za-z0-9#+.\-]+',' ',question)
words=word_tokenize(str(question.lower()))
#Removing all single letter and and stopwords from question exceptt for the letter 'c'
question=' '.join(str(stemmer.stem(j)) for j in words if j not in stop_words and (len(j)!=1 or j=='c'))
len_post+=len(question)
tup = (question,code,tags,x,len(question),is_code)
questions_proccesed += 1
writer.execute("insert into QuestionsProcessed(question,code,tags,words_pre,words_post,is_code) values (?,?,?,?,?,?)",tup)
if (questions_proccesed%100000==0):
print("number of questions completed=",questions_proccesed)
no_dup_avg_len_pre=(len_pre*1.0)/questions_proccesed
no_dup_avg_len_post=(len_post*1.0)/questions_proccesed
print( "Avg. length of questions(Title+Body) before processing: %d"%no_dup_avg_len_pre)
print( "Avg. length of questions(Title+Body) after processing: %d"%no_dup_avg_len_post)
print ("Percent of questions containing code: %d"%((questions_with_code*100.0)/questions_proccesed))
print("Time taken to run this cell :", datetime.now() - start)
# never forget to close the conections or else we will end up with database locks
conn_r.commit()
conn_w.commit()
conn_r.close()
conn_w.close()
```
__ Sample quesitons after preprocessing of data __
```
if os.path.isfile(write_db):
conn_r = create_connection(write_db)
if conn_r is not None:
reader =conn_r.cursor()
reader.execute("SELECT question From QuestionsProcessed LIMIT 10")
print("Questions after preprocessed")
print('='*100)
reader.fetchone()
for row in reader:
print(row)
print('-'*100)
conn_r.commit()
conn_r.close()
```
__ Saving Preprocessed data to a Database __
```
#Taking 0.5 Million entries to a dataframe.
write_db = 'Titlemoreweight.db'
if os.path.isfile(write_db):
conn_r = create_connection(write_db)
if conn_r is not None:
preprocessed_data = pd.read_sql_query("""SELECT question, Tags FROM QuestionsProcessed""", conn_r)
conn_r.commit()
conn_r.close()
preprocessed_data.head()
print("number of data points in sample :", preprocessed_data.shape[0])
print("number of dimensions :", preprocessed_data.shape[1])
```
__ Converting string Tags to multilable output variables __
```
vectorizer = CountVectorizer(tokenizer = lambda x: x.split(), binary='true')
multilabel_y = vectorizer.fit_transform(preprocessed_data['tags'])
```
__ Selecting 500 Tags __
```
def tags_to_choose(n):
t = multilabel_y.sum(axis=0).tolist()[0]
sorted_tags_i = sorted(range(len(t)), key=lambda i: t[i], reverse=True)
multilabel_yn=multilabel_y[:,sorted_tags_i[:n]]
return multilabel_yn
def questions_explained_fn(n):
multilabel_yn = tags_to_choose(n)
x= multilabel_yn.sum(axis=1)
return (np.count_nonzero(x==0))
questions_explained = []
total_tags=multilabel_y.shape[1]
total_qs=preprocessed_data.shape[0]
for i in range(500, total_tags, 100):
questions_explained.append(np.round(((total_qs-questions_explained_fn(i))/total_qs)*100,3))
fig, ax = plt.subplots()
ax.plot(questions_explained)
xlabel = list(500+np.array(range(-50,450,50))*50)
ax.set_xticklabels(xlabel)
plt.xlabel("Number of tags")
plt.ylabel("Number Questions coverd partially")
plt.grid()
plt.show()
# you can choose any number of tags based on your computing power, minimun is 500(it covers 90% of the tags)
print("with ",5500,"tags we are covering ",questions_explained[50],"% of questions")
print("with ",500,"tags we are covering ",questions_explained[0],"% of questions")
# we will be taking 500 tags
multilabel_yx = tags_to_choose(500)
print("number of questions that are not covered :", questions_explained_fn(500),"out of ", total_qs)
from sklearn.externals import joblib
joblib.dump(preprocessed_data, 'preprocessed_data.pkl')
x_train=preprocessed_data.head(train_datasize)
x_test=preprocessed_data.tail(preprocessed_data.shape[0] - 400000)
y_train = multilabel_yx[0:train_datasize,:]
y_test = multilabel_yx[train_datasize:preprocessed_data.shape[0],:]
print("Number of data points in train data :", y_train.shape)
print("Number of data points in test data :", y_test.shape)
```
<h3> 4.5.2 Featurizing data with TfIdf vectorizer </h3>
```
print("a")
start = datetime.now()
vectorizer = TfidfVectorizer(min_df=0.00009, max_features=200000, smooth_idf=True, norm="l2", \
tokenizer = lambda x: x.split(), sublinear_tf=False,
ngram_range=(1,4))
x_train_multilabel = vectorizer.fit_transform(x_train['question'])
x_test_multilabel = vectorizer.transform(x_test['question'])
print("Time taken to run this cell :", datetime.now() - start)
print("Dimensions of train data X:",x_train_multilabel.shape, "Y :",y_train.shape)
print("Dimensions of test data X:",x_test_multilabel.shape,"Y:",y_test.shape)
```
<h3> 4.5.3 OneVsRest Classifier with SGDClassifier using TFIDF </h3>
```
start = datetime.now()
classifier = OneVsRestClassifier(SGDClassifier(loss='log',
alpha=0.00001,
penalty='l1'), n_jobs=-1)
classifier.fit(x_train_multilabel, y_train)
predictions = classifier.predict (x_test_multilabel)
print("Accuracy :",metrics.accuracy_score(y_test, predictions))
print("Hamming loss ",metrics.hamming_loss(y_test,predictions))
precision = precision_score(y_test, predictions, average='micro')
recall = recall_score(y_test, predictions, average='micro')
f1 = f1_score(y_test, predictions, average='micro')
print("Micro-average quality numbers")
print("Precision: {:.4f}, Recall: {:.4f}, F1-measure: {:.4f}".format(precision, recall, f1))
precision = precision_score(y_test, predictions, average='macro')
recall = recall_score(y_test, predictions, average='macro')
f1 = f1_score(y_test, predictions, average='macro')
print("Macro-average quality numbers")
print("Precision: {:.4f}, Recall: {:.4f}, F1-measure: {:.4f}".format(precision, recall, f1))
print (metrics.classification_report(y_test, predictions))
print("Time taken to run this cell :", datetime.now() - start)
joblib.dump(classifier, 'lr_with_more_title_weight.pkl')
```
##### ASSIGNMENT
<ol>
<li> bag of words upto 4 grams and compute the micro f1 score with Logistic regression(OvR) </li>
<li> Perform hyperparam tuning on alpha (or lambda) for Logistic regression to improve the performance using GridSearch </li>
<li> OneVsRestClassifier with Linear-SVM (SGDClassifier with loss-hinge)</li>
</ol>
## Featurizing Using Bag of Words
```
alpha=[10**-3,10**-2,10**-1]
start = datetime.now()
vectorizer = CountVectorizer(min_df=0.00009, max_features=200000, \
tokenizer = lambda x: x.split(), ngram_range=(1,4))
x_train_multilabel = vectorizer.fit_transform(x_train['question'])
x_test_multilabel = vectorizer.transform(x_test['question'])
print("Time taken to run this cell :", datetime.now() - start)
print("Dimensions of train data X:",x_train_multilabel.shape, "Y :",y_train.shape)
print("Dimensions of test data X:",x_test_multilabel.shape,"Y:",y_test.shape)
```
##### Dump and load train and test data into joblib
```
joblib.dump(x_train_multilabel, 'x_train_BOW.pkl')
joblib.dump(x_test_multilabel, 'x_test_BOW.pkl')
joblib.dump(y_train, 'y_train.pkl')
joblib.dump(y_test, 'y_test.pkl')
x_train_multilabel = joblib.load('x_train_BOW.pkl')
y_train = joblib.load('y_train.pkl')
x_test_multilabel = joblib.load('x_test_BOW.pkl')
y_test = joblib.load('y_test.pkl')
```
# OneVsRestClassifier with Logistic regression
#### (alpha tuning using Gridsearch)
## OneVsRestClassifier with SGDClassifier( penalty=l2, loss=log )==> {Logistic regression}
```
start = datetime.now()
import warnings
warnings.filterwarnings('ignore')
# hp1={'estimator__C':alpha}
cv_scores = []
for i in alpha:
print(i)
hp1={'estimator__alpha':[i],
'estimator__loss':['log'],
'estimator__penalty':['l2']}
print(hp1)
classifier = OneVsRestClassifier(SGDClassifier())
model11 =GridSearchCV(classifier,hp1,
cv=3, scoring='f1_micro',n_jobs=-1)
print("Gridsearchcv")
best_model1=model11.fit(x_train_multilabel, y_train)
print('fit model')
Train_model_score=best_model1.score(x_train_multilabel,
y_train)
#print("best_model1")
cv_scores.append(Train_model_score.mean())
fscore = [x for x in cv_scores]
# determining best alpha
optimal_alpha21 = alpha[fscore.index(max(fscore))]
print('\n The optimal value of alpha with penalty=l2 and loss= log is %d.' % optimal_alpha21)
# Plots
fig4 = plt.figure( facecolor='c', edgecolor='k')
plt.plot(alpha, fscore,color='green', marker='o', linestyle='dashed',
linewidth=2, markersize=12)
for xy in zip(alpha, np.round(fscore,3)):
plt.annotate('(%s, %s)' % xy, xy=xy, textcoords='data')
plt.xlabel('Hyper parameter Alpha')
plt.ylabel('F1_Score value ')
plt.show()
print("Time taken to run this cell :", datetime.now() - start)
print(optimal_alpha21)
start = datetime.now()
best_model1 = OneVsRestClassifier(SGDClassifier(loss='log', alpha=optimal_alpha21,
penalty='l2'), n_jobs=-1)
best_model1.fit(x_train_multilabel, y_train)
joblib.dump(best_model1, 'best_model1_LR.pkl')
best_model1=joblib.load('best_model1_LR.pkl')
predictions = best_model1.predict (x_test_multilabel)
print("Accuracy :",metrics.accuracy_score(y_test, predictions))
print("Hamming loss ",metrics.hamming_loss(y_test,predictions))
precision = precision_score(y_test, predictions, average='micro')
recall = recall_score(y_test, predictions, average='micro')
f1 = f1_score(y_test, predictions, average='micro')
print("Micro-averasge quality numbers")
print("Precision: {:.4f}, Recall: {:.4f}, F1-measure: {:.4f}".format(precision, recall, f1))
precision = precision_score(y_test, predictions, average='macro')
recall = recall_score(y_test, predictions, average='macro')
f1 = f1_score(y_test, predictions, average='macro')
print("Macro-average quality numbers")
print("Precision: {:.4f}, Recall: {:.4f}, F1-measure: {:.4f}".format(precision, recall, f1))
print (metrics.classification_report(y_test, predictions)) #printing classification report for all 500 labels
print("Time taken to run this cell :", datetime.now() - start)
```
## OneVsRestClassifier with Logistic regression( penalty=l1 )
```
start = datetime.now()
import warnings
warnings.filterwarnings('ignore')
# hp1={'estimator__C':alpha}
cv_scores = []
for i in alpha:
print(i)
hp1={'estimator__alpha':[i],
'estimator__loss':['log'],
'estimator__penalty':['l1']}
print(hp1)
classifier = OneVsRestClassifier(SGDClassifier())
model11 =GridSearchCV(classifier,hp1,
cv=3, scoring='f1_micro',n_jobs=-1)
print("Gridsearchcv")
best_model1=model11.fit(x_train_multilabel, y_train)
print('fit model')
Train_model_score=best_model1.score(x_train_multilabel,
y_train)
#print("best_model1")
cv_scores.append(Train_model_score.mean())
fscore = [x for x in cv_scores]
# determining best alpha
optimal_alpha22 = alpha[fscore.index(max(fscore))]
print('\n The optimal value of alpha with penalty=l1 and loss= log is %d.' % optimal_alpha22)
# Plots
fig4 = plt.figure( facecolor='c', edgecolor='k')
plt.plot(alpha, fscore,color='green', marker='o', linestyle='dashed',
linewidth=2, markersize=12)
for xy in zip(alpha, np.round(fscore,3)):
plt.annotate('(%s, %s)' % xy, xy=xy, textcoords='data')
plt.xlabel('Hyper parameter Alpha')
plt.ylabel('F1_Score value ')
plt.show()
print("Time taken to run this cell :", datetime.now() - start)
start = datetime.now()
best_model2 = OneVsRestClassifier(SGDClassifier(loss='log', alpha=optimal_alpha22,
penalty='l1'), n_jobs=-1)
best_model2.fit(x_train_multilabel, y_train)
joblib.dump(best_model2, 'best_model2_LR.pkl')
best_model2=joblib.load('best_model2_LR.pkl')
```
## Logistic regression with l1 penalty
```
start = datetime.now()
#classifier = OneVsRestClassifier(LogisticRegression(penalty='l1'), n_jobs=-1)
#classifier.fit(x_train_multilabel, y_train)
predictions = best_model2.predict(x_test_multilabel)
print("Accuracy :",metrics.accuracy_score(y_test, predictions))
print("Hamming loss ",metrics.hamming_loss(y_test,predictions))
precision = precision_score(y_test, predictions, average='micro')
recall = recall_score(y_test, predictions, average='micro')
f1 = f1_score(y_test, predictions, average='micro')
print("Micro-average quality numbers")
print("Precision: {:.4f}, Recall: {:.4f}, F1-measure: {:.4f}".format(precision, recall, f1))
precision = precision_score(y_test, predictions, average='macro')
recall = recall_score(y_test, predictions, average='macro')
f1 = f1_score(y_test, predictions, average='macro')
print("Macro-average quality numbers")
print("Precision: {:.4f}, Recall: {:.4f}, F1-measure: {:.4f}".format(precision, recall, f1))
print (metrics.classification_report(y_test, predictions))
print("Time taken to run this cell :", datetime.now() - start)
```
## OneVsRestClassifier with Linear-SVM (SGDClassifier with loss-hinge)
```
start = datetime.now()
import warnings
warnings.filterwarnings('ignore')
# hp1={'estimator__C':alpha}
cv_scores = []
for i in alpha:
print(i)
hp1={'estimator__alpha':[i],
'estimator__loss':['hinge'],
'estimator__penalty':['l1']}
print(hp1)
classifier = OneVsRestClassifier(SGDClassifier())
model11 =GridSearchCV(classifier,hp1,
cv=3, scoring='f1_micro',n_jobs=-1)
print("Gridsearchcv")
best_model1=model11.fit(x_train_multilabel, y_train)
print('fit model')
Train_model_score=best_model1.score(x_train_multilabel,
y_train)
#print("best_model1")
cv_scores.append(Train_model_score.mean())
fscore = [x for x in cv_scores]
# determining best alpha
optimal_alpha23 = alpha[fscore.index(max(fscore))]
print('\n The optimal value of alpha with penalty=l1 and loss= log is %d.' % optimal_alpha23)
# Plots
fig4 = plt.figure( facecolor='c', edgecolor='k')
plt.plot(alpha, fscore,color='green', marker='o', linestyle='dashed',
linewidth=2, markersize=12)
for xy in zip(alpha, np.round(fscore,3)):
plt.annotate('(%s, %s)' % xy, xy=xy, textcoords='data')
plt.xlabel('Hyper parameter Alpha')
plt.ylabel('F1_Score value ')
plt.show()
print("Time taken to run this cell :", datetime.now() - start)
```
## OneVsRestClassifier with SGDClassifier for optimal alpha with hinge loss
```
start = datetime.now()
classifier2 = OneVsRestClassifier(SGDClassifier(loss='hinge',
alpha=optimal_alpha23,
penalty='l1'))
classifier2=classifier2.fit(x_train_multilabel, y_train)
joblib.dump(classifier2, 'classifier2.pkl')
classifier2=joblib.load('classifier2.pkl')
predictions = classifier2.predict (x_test_multilabel)
print("Accuracy :",metrics.accuracy_score(y_test, predictions))
print("Hamming loss ",metrics.hamming_loss(y_test,predictions))
precision = precision_score(y_test, predictions, average='micro')
recall = recall_score(y_test, predictions, average='micro')
f1 = f1_score(y_test, predictions, average='micro')
print("Micro-averasge quality numbers")
print("Precision: {:.4f}, Recall: {:.4f}, F1-measure: {:.4f}".format(precision, recall, f1))
precision = precision_score(y_test, predictions, average='macro')
recall = recall_score(y_test, predictions, average='macro')
f1 = f1_score(y_test, predictions, average='macro')
print("Macro-average quality numbers")
print("Precision: {:.4f}, Recall: {:.4f}, F1-measure: {:.4f}".format(precision, recall, f1))
print (metrics.classification_report(y_test, predictions)) #printing classification report for all 500 labels
print("Time taken to run this cell :", datetime.now() - start)
```
# Observation
```
from prettytable import PrettyTable
x = PrettyTable()
x.field_names = ["Sr.No", "MODEL","FEATURIZATION","PENALTY" ,"ALPHA",'LOSS','MICRO_F1_SCORE']
x.add_row(["1", 'OneVsRest+SGD Classifier', "Tf-idf","l1",0.0001,"log",0.4488])
x.add_row(["2", 'OneVsRest+SGD(log)=LR', "Bag-of-words","l2",0.001,"log",0.4268])
x.add_row(["3", 'OneVsRest+SGD(log)=LR', "Bag-of-words","l1",0.001,"log",0.4104])
x.add_row(["4", 'OneVsRest+SGD Classifier', "Bag-of-words","l1",0.001,"Hinge",0.4028])
print(x)
```
* The objective's result is shown as above.
* Model {bag of words upto 4 grams and computed the micro f1 score with Logistic regression(OvR)} performs 42.68% on tag prediction which is not higher than the result obtained with model{ TF-IDF with alpha=00.0001 ,n_grams=(1,3)}
* The performance of model with various alpha value is shown in graph.
| github_jupyter |
# Interact with decay data in ENDF-6 format (MF=8, MT=457)
First import `sandy` and other packages importing for formatting and postprocessing that are used in this notebook.
```
import os
import yaml
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import sandy
%load_ext autoreload
%autoreload 2
```
## Read decay data in ENDF-6 format
As always, any nuclear data evaluation can be imported into a `Endf6` instance
```
tape = sandy.Endf6.from_file("dec-027_Co_060.endf")
```
The radioactive decay data text (in ENDF-6 format) for all sections and isotopes in `tape` can be
parsed and extracted in a hierarchical format (nested `dict`) into a `DecayData` instance, say, variable `rdd`.
```
rdd = sandy.DecayData.from_endf6(tape)
rdd.data
```
A better rendering of the `rdd` data content can be obtained with the `yaml` python package.
```
print(yaml.dump(rdd.data))
```
The description of the `rdd.data` structure is explained below, where `zam` and `zap` are
equal to `Z * 10000 + A * 10 + M` for the parent and daughter isotopes, respectively, with
`Z`, `A`, and `M` being the charge, nucleons and metastate numbers.
```yaml
zam :
half_life : value in s
decay_constant : value in 1/s
stable : True/False
decay_energy :
alpha: value in eV
beta: value in eV
gamma: value in eV
total: value in eV
decay_energy_uncertainty :
alpha: value in eV
beta: value in eV
gamma: value in eV
total: value in eV
decay_modes :
rtyp :
decay_products :
zap : yield
zap_2 : yield_2
...
branching_ratio : value
rtyp_2 : ...
zam_2 : ...
```
`rtyp` is a string storing a sequence of integers, each of which defining the
several decay events covered by a given decay mode.
The list of individual decay events is accessible via variable `sandy.decay_modes`.
```
print(yaml.dump(sandy.decay_modes))
```
## Extract structured information into dataframes
We can extract the decay chains information (decay constant, branching ratio, yield, ...) into a `pandas.DataFrame`.
```
chains = rdd.get_decay_chains()
chains
```
Or get the B-matrix, the matrix of branching ratios used to produce...
```
bm = rdd.get_bmatrix()
bm
```
...the Q-matrix, to get cumulative yields `Y` from independent yields `C`, according to `Y = Q C`.
```
qm = rdd.get_qmatrix()
qm
```
Another thing you can get is the transition matrix `T` used to solve the depletion equation
for radioactive decay `dN\N = T N `.
```
tm = rdd.get_transition_matrix()
tm
```
## Convert decay data into HDF5 format
We can write the content of `rdd` into a hdf5 file, say `"decay.hdf5"`,
under a group namespace to identify the library, e.g. `"endfb_71"`.
```
h5filename = "decay.hdf5"
libname = "endfb_71"
rdd.to_hdf5("decay.hdf5", "endfb_71")
rdd2 = sandy.DecayData.from_hdf5(h5filename, libname)
assert rdd2.get_decay_chains().equals(chains)
os.remove(h5filename)
```
| github_jupyter |
```
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import sklearn as sk
import tensorflow as tf
import matplotlib.pyplot as plt
import pandas as pd
import sklearn.model_selection as ms
import sklearn.preprocessing as p
import math
tf.version.VERSION
mnist = pd.read_csv("../input/digit-recognizer/train.csv")
mnist.shape
mnist.columns
x = mnist.drop('label', axis=1)
x.head()
y = mnist['label']
y.head()
y = tf.keras.utils.to_categorical(y, num_classes=10)
y[0]
X_train, X_val, y_train, y_val = ms.train_test_split(x, y, test_size=0.15)
scaler = p.StandardScaler()
X_train = scaler.fit_transform(X_train)
X_train = X_train.reshape(-1, 28, 28, 1)
X_val = scaler.transform(X_val)
X_val = X_val.reshape(-1, 28, 28, 1)
X_train.shape, X_val.shape
y_train.shape, y_val.shape
X_train.min()
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D, Flatten
model = Sequential()
# Convolutional Layer
model.add(Conv2D(filters=32, kernel_size=(4,4), input_shape=(28, 28, 1), activation='relu'))
# pooling layer
model.add(MaxPool2D(pool_size=(2,2)))
# transform both layers to dense layer, thus, we need to flatten
# 2d to 1 d
model.add(Flatten())
# Dense Layer
model.add(Dense(128, activation='relu'))
# output layer
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics = ['accuracy'])
model.summary()
model.fit(X_train, y_train, epochs=20)
from sklearn.metrics import classification_report, confusion_matrix
X_pred = pd.read_csv('../input/digit-recognizer/test.csv')
X_pred = scaler.transform(X_pred)
X_pred = X_pred.reshape(-1, 28, 28, 1)
y_pred = pd.DataFrame()
y_pred['ImageId'] = pd.Series(range(1,X_pred.shape[0] + 1))
predictions.shape
results = model.predict(X_pred)
# y_pred.to_csv("submission.csv", index=False)
results.shape
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_submission.csv",index=False)
submission.to_csv("cnn_submission.csv",index=False)
import pandas as pd
m_test = pd.read_csv("../input/digit-recognizer/test.csv")
m_train = pd.read_csv("../input/digit-recognizer/train.csv")
cols = m_test.columns
cols
m_test['dataset'] = 'test'
m_train['dataset'] = 'train'
m_test.shape
m_train.shape
m_test['dataset']
dataset = pd.concat([m_train.drop('label', axis=1), m_test]).reset_index()
dataset.shape
csv_test = pd.read_csv("../input/mnist-in-csv/mnist_test.csv")
csv_train = pd.read_csv("../input/mnist-in-csv/mnist_train.csv")
sample_submission = pd.read_csv("../input/digit-recognizer/sample_submission.csv")
csv_train.head()
mnist = pd.concat([csv_train, csv_test]).reset_index(drop=True)
mnist
labels = mnist['label'].values
labels
mnist.drop('label', axis=1, inplace=True)
mnist.columns = cols
id_mnist = mnist.sort_values(by=list(mnist.columns)).index
id_mnist
dataset_from = dataset.sort_values(by=list(mnist.columns))['dataset'].values
origin_id = dataset.sort_values(by=list(mnist.columns))['index'].values
for i in range(len(id_mnist)):
if dataset_from[i] == 'test':
sample_submission.loc[origin_id[i], 'Label'] = labels[id_mnist[i]]
sample_submission
sample_submission.to_csv("samp_fake_it.csv",index=False)
```
| github_jupyter |
# NLP (Natural Language Processing) with Python
This is the notebook that goes along with the NLP video lecture!
In this lecture we will discuss a higher level overview of the basics of Natural Language Processing, which basically consists of combining machine learning techniques with text, and using math and statistics to get that text in a format that the machine learning algorithms can understand!
Once you've completed this lecture you'll have a project using some Yelp Text Data!
**Requirements: You will need to have NLTK installed, along with downloading the corpus for stopwords. To download everything with a conda installation, run the cell below. Or reference the full video lecture**
```
import nltk
nltk.download_shell()
```
## Part 1: Get the Data
We'll be using a dataset from the [UCI datasets](https://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection)! This dataset is already located in the folder for this section.
The file we are using contains a collection of more than 5 thousand SMS phone messages. You can check out the **readme** file for more info.
*Recall that __rstrip()__ is used to return a copy of the string with the trailing characters removed.*
<br>
<br>
Let's go ahead and use __rstrip()__ plus a list comprehension to get a list of all the lines of text messages:
```
messages = [line.rstrip() for line in open('smsspamcollection/SMSSpamCollection')]
print(len(messages))
messages[10]
```
<font color=#2948ff>A collection of texts is also sometimes called __"corpus"__. Let's print the first ten messages and number them using **enumerate**:</font>
```
for message_number, message in enumerate(messages[:11]):
print(message_number, message)
print('\n')
```
Due to the spacing we can tell that this is a **[TSV](http://en.wikipedia.org/wiki/Tab-separated_values) ("tab separated values") file**, where the first column is a label saying whether the given message is a normal message (commonly known as <font color=#a80077>"ham"</font>) or <font color=#a80077>"spam"</font>. The second column is the message itself. (Note our numbers aren't part of the file, they are just from the **enumerate** call).
Using these labeled ham and spam examples, we'll **train a machine learning model to learn to discriminate between ham/spam automatically**. Then, with a trained model, we'll be able to **classify arbitrary unlabeled messages** as ham or spam.
From the official SciKit Learn documentation, we can visualize our process:
Instead of parsing TSV manually using Python, we can just take advantage of pandas! Let's go ahead and import it!
```
import pandas as pd
```
We'll use **read_csv** and make note of the **sep** argument, we can also specify the desired column names by passing in a list of *names*.
```
messages = pd.read_csv('smsspamcollection/SMSSpamCollection', sep='\t', names=['label', 'message'])
messages.head()
```
## Part 2: Exploratory Data Analysis
Let's check out some of the stats with some plots and the built-in methods in pandas!
```
messages.describe()
```
<font color=#a80077>Let's use **groupby** to use describe by label, this way we can begin to think about the features that separate ham and spam!</font>
```
messages.groupby('label').describe()
```
As we continue our analysis we want to start thinking about the features we are going to be using. This goes along with the general idea of [feature engineering](https://en.wikipedia.org/wiki/Feature_engineering). The better your domain knowledge on the data, the better your ability to engineer more features from it. Feature engineering is a very large part of spam detection in general. I encourage you to read up on the topic!
Let's make a new column to detect how long the text messages are:
```
messages['length'] = messages['message'].apply(len)
messages.head()
```
### Data Visualization
Let's visualize this! Let's do the imports:
```
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set_style('whitegrid')
plt.figure(figsize=(12,6))
messages['length'].plot(kind='hist', bins=150, colormap='magma')
```
Play around with the bin size! Looks like text length may be a good feature to think about! Let's try to explain why the x-axis goes all the way to 1000ish, this must mean that there is some really long message!
```
messages['length'].describe()
# messages.length.describe() can also be used here
```
Woah! 910 characters, let's use masking to find this message:
```
messages[messages['length'] == 910]
messages[messages['length'] == 910]['message'].iloc[0]
```
Looks like we have some sort of Romeo sending texts! But let's focus back on the idea of trying to see if message length is a distinguishing feature between ham and spam:
```
messages.hist(column='length', by='label', bins=60, figsize=(12,6))
```
Very interesting! Through just basic EDA we've been able to discover a trend that spam messages tend to have more characters. (Sorry Romeo!)
Now let's begin to process the data so we can eventually use it with SciKit Learn!
## Part 3: Text Pre-processing
Our main issue with our data is that it is all in text format (strings). The classification algorithms that we've learned about so far will need some sort of numerical feature vector in order to perform the classification task. __<font color=#a80077>There are actually many methods to convert a corpus to a vector format. The simplest is the the [bag-of-words](http://en.wikipedia.org/wiki/Bag-of-words_model) approach, where each unique word in a text will be represented by one number.</font>__
In this section we'll convert the raw messages (sequence of characters) into vectors (sequences of numbers).
1. __As a first step, let's write a function that will split a message into its individual words and return a list.__
<br>
2. __We'll also remove very common words, ('the', 'a', etc..).__ To do this we will take advantage of the NLTK library. It's pretty much the standard library in Python for processing text and has a lot of useful features. We'll only use some of the basic ones here.
Let's create a function that will process the string in the message column, then we can just use **apply()** in pandas do process all the text in the DataFrame.
First removing punctuation. We can just take advantage of Python's built-in **string** library to get a quick list of all the possible punctuation:
```
import string
example = 'Sample message! Notice: it has punctuation.'
# Check characters to see if they are in punctuation
nopunc = [char for char in example if char not in string.punctuation]
# Join the characters again to form the string.
nopunc = ''.join(nopunc)
nopunc = ''.join(nopunc)
nopunc
```
Now let's see how to remove stopwords. We can import a list of english stopwords from NLTK (check the documentation for more languages and info).
```
from nltk.corpus import stopwords
# Some English stop words
stopwords.words('english')[:10]
# back to the example, we are going to remove the stopwords in nopunc
nopunc.split()
# remove stopwords
nopunc_clean = [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
nopunc_clean
```
Now let's put both of these together in a function to apply it to our DataFrame later on:
```
def text_process(mess):
"""
Takes in a string of text, then performs the following:
1. Remove all punctuation
2. Remove all stopwords
3. Returns a list of the cleaned text
"""
# check characters to see if they contain punctuation
nopunc = [char for char in mess if char not in string.punctuation]
# join the characters again to form the string.
nopunc = ''.join(nopunc)
# remove any stopwords
return[word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
```
Here is the original DataFrame again:
```
messages.head()
```
Now let's "tokenize" these messages. <font color=#a80077>__Tokenization__ is just the term used to describe the process of converting the normal text strings in to a list of tokens (words that we actually want).</font>
Let's see an example output on on column:
**Note:**
We may get some warnings or errors for symbols we didn't account for or that weren't in Unicode (like a British pound symbol)
```
# Check to make sure its working
messages['message'].head().apply(text_process)
# Original dataframe
messages.head()
```
### Continuing Normalization
__There are a lot of ways to continue normalizing this text. Such as [Stemming](https://en.wikipedia.org/wiki/Stemming) or distinguishing by [part of speech](http://www.nltk.org/book/ch05.html).__
NLTK has lots of built-in tools and great documentation on a lot of these methods. __*Sometimes they don't work well for text-messages due to the way a lot of people tend to use abbreviations or shorthand*__, For example:
'Nah dawg, IDK! Wut time u headin to da club?'
versus
'No dog, I don't know! What time are you heading to the club?'
Some text normalization methods will have trouble with this type of shorthand and so I'll leave you to explore those more advanced methods through the __[NLTK book online](http://www.nltk.org/book/).__
For now we will just focus on using what we have to convert our list of words to an actual vector that SciKit-Learn can use.
## Part 4: Vectorization
Currently, we have the messages as lists of tokens (also known as [lemmas](http://nlp.stanford.edu/IR-book/html/htmledition/stemming-and-lemmatization-1.html)) and now we need to convert each of those messages into a vector the SciKit Learn's algorithm models can work with.
Now we'll convert each message, represented as a list of tokens (lemmas) above, into a vector that machine learning models can understand.
__We'll do that in three steps using the bag-of-words model:__
1. __Count how many times a word occurs in each message <font color=#a80077>(Term frequency)</font>__
2. __Weigh the counts, so that frequent tokens get lower weight <font color=#a80077>(Inverse document frequency)</font>__
3. __Normalize the vectors to unit length, to abstract from the original text length <font color=#a80077>(L2 norm)</font>__
Let's begin the first step:
Each vector will have as many dimensions as there are unique words in the SMS corpus. We will first use SciKit Learn's **CountVectorizer**. This model will convert a collection of text documents to a matrix of token counts.
We can imagine this as a 2-Dimensional matrix. Where the 1-dimension is the entire vocabulary (1 row per word) and the other dimension are the actual documents, in this case a column per text message.
For example:
<table border = “1“>
<tr>
<th></th> <th>Message 1</th> <th>Message 2</th> <th>...</th> <th>Message N</th>
</tr>
<tr>
<td><b>Word 1 Count</b></td><td>0</td><td>1</td><td>...</td><td>0</td>
</tr>
<tr>
<td><b>Word 2 Count</b></td><td>0</td><td>0</td><td>...</td><td>0</td>
</tr>
<tr>
<td><b>...</b></td> <td>1</td><td>2</td><td>...</td><td>0</td>
</tr>
<tr>
<td><b>Word N Count</b></td> <td>0</td><td>1</td><td>...</td><td>1</td>
</tr>
</table>
Since there are so many messages, we can expect a lot of zero counts for the presence of that word in that document. Because of this, SciKit Learn will output a [Sparse Matrix](https://en.wikipedia.org/wiki/Sparse_matrix).
__<font color=#a80077>Shape Matrix or sparse array is a matrix where most of the elements are zero. If most of the elements are nonzero, then the matrix is considered dense</font>__
```
from sklearn.feature_extraction.text import CountVectorizer
```
There are a lot of arguments and parameters that can be passed to the CountVectorizer. In this case we will just specify the **analyzer** to be our own previously defined function:
```
bow_transformer = CountVectorizer(analyzer=text_process).fit(messages['message'])
# print total number of vocab words
print(len(bow_transformer.vocabulary_))
```
Let's take one text message and get its bag-of-words counts as a vector, putting to use our new `bow_transformer`:
```
message4 = messages['message'][3]
print(message4)
```
Now let's see its vector representation:
```
bow4 = bow_transformer.transform([message4])
print(bow4)
print('\n')
print(bow4.shape)
```
__This means that there are seven unique words in message number 4 (after removing common stop words). Two of them appear twice, the rest only once.__ Let's go ahead and check and confirm which ones appear twice:
```
print(bow_transformer.get_feature_names()[4068])
print(bow_transformer.get_feature_names()[9554])
```
Now we can use **.transform** on our Bag-of-Words (bow) transformed object and transform the entire DataFrame of messages. Let's go ahead and check out how the bag-of-words counts for the entire SMS corpus is a large, sparse matrix:
```
messages_bow = bow_transformer.transform(messages['message'])
print('Shape of Sparse Matrix: ', messages_bow.shape)
# Shape Matrix or sparse array is a matrix where most of the elements are zero.
print('Amount of Non-Zero occurences: ', messages_bow.nnz)
# .nnz == non zero occurences
sparsity = (100.0 * messages_bow.nnz / (messages_bow.shape[0] * messages_bow.shape[1]))
# print('sparsity: {}'.format(sparsity))
print('sparsity: {}'.format(round(sparsity)))
```
After the counting, the term weighting and normalization can be done with [TF-IDF](http://en.wikipedia.org/wiki/Tf%E2%80%93idf), using scikit-learn's `TfidfTransformer`.
____
### So what is TF-IDF?
__<font color=#a80077>TF-IDF stands for *term frequency-inverse document frequency.* The tf-idf weight is a weight often used in information retrieval and text mining. This weight is a statistical measure used to evaluate how important a word is to a document in a collection or corpus. The importance increases proportionally to the number of times a word appears in the document but is *offset* by the frequency of the word in the corpus. Variations of the tf-idf weighting scheme are often used by search engines as a central tool in scoring and ranking a document's relevance given a user query.</font>__
__One of the simplest ranking functions is computed by summing the tf-idf for each query term;__ many more sophisticated ranking functions are variants of this simple model.
__Typically, the tf-idf weight is composed by two terms:__
__<font color=#a80077>The first computes the normalized Term Frequency (TF),</font> aka the number of times a word appears in a document, divided by the total number of words in that document;__
__<font color=#a80077>The second term is the Inverse Document Frequency (IDF),</font> computed as the logarithm of the number of the documents in the corpus divided by the number of documents where the specific term appears.__
<font color=#a80077>**TF: Term Frequency**</font>, which measures how frequently a term occurs in a document. Since every document is different in length, it is possible that a term would appear much more times in long documents than shorter ones. Thus, the term frequency is often divided by the document length (aka the total number of terms in the document) as a way of normalization:
<font color=#2C7744>*TF(t) = (Number of times term t appears in a document) / (Total number of terms in the document)*</font>
<font color=#a80077>**IDF: Inverse Document Frequency**</font>, which measures how important a term is. While computing TF, all terms are considered equally important. However it is known that certain terms, such as "is", "of", and "that", may appear a lot of times but have little importance. Thus we need to weigh down the frequent terms while scale up the rare ones, by computing the following:
<font color=#2C7744>*IDF(t) = log_e(Total number of documents / Number of documents with term t in it)*</font>
See below for a simple example.
**Example:**
Consider a document containing 100 words wherein the word cat appears 3 times.
The term frequency (i.e., tf) for cat is then (3 / 100) = 0.03. Now, assume we have 10 million documents and the word cat appears in one thousand of these. Then, the inverse document frequency (i.e., idf) is calculated as log(10,000,000 / 1,000) = 4. Thus, the Tf-idf weight is the product of these quantities: 0.03 * 4 = 0.12.
____
Let's go ahead and see how we can do this in SciKit Learn:
```
from sklearn.feature_extraction.text import TfidfTransformer
tfidf_transformer = TfidfTransformer().fit(messages_bow)
tfidf4 = tfidf_transformer.transform(bow4)
print(tfidf4)
```
We'll go ahead and check what is the IDF (inverse document frequency) of the word `"u"` and of word `"university"`?
```
print(tfidf_transformer.idf_[bow_transformer.vocabulary_['u']])
print(tfidf_transformer.idf_[bow_transformer.vocabulary_['university']])
```
To transform the entire bag-of-words corpus into TF-IDF corpus at once:
```
messages_tfidf = tfidf_transformer.transform(messages_bow)
print(messages_tfidf.shape)
```
__There are many ways the data can be preprocessed and vectorized. These steps involve feature engineering and building a "pipeline".__ I encourage you to check out SciKit Learn's documentation on dealing with text data as well as the expansive collection of available papers and books on the general topic of NLP.
## Part 5: Training a model
With messages represented as vectors, we can finally train our spam/ham classifier. Now we can actually use almost any sort of classification algorithms. For a [variety of reasons](http://www.inf.ed.ac.uk/teaching/courses/inf2b/learnnotes/inf2b-learn-note07-2up.pdf), the __Naive Bayes classifier algorithm__ is a good choice.
We'll be using scikit-learn here, choosing the [Naive Bayes](http://en.wikipedia.org/wiki/Naive_Bayes_classifier) classifier to start with:
```
from sklearn.naive_bayes import MultinomialNB
spam_detect_model = MultinomialNB().fit(messages_tfidf, messages['label'])
```
Let's try classifying our single random message and checking how we do:
```
print('predicted: ', spam_detect_model.predict(tfidf4)[0])
print('expected: ', messages.label[3])
```
Fantastic! We've developed a model that can attempt to predict spam vs ham classification!
## Part 6: Model Evaluation
Now we want to determine how well our model will do overall on the entire dataset. Let's begin by getting all the predictions:
```
all_predictions = spam_detect_model.predict(messages_tfidf)
print(all_predictions)
```
__We can use SciKit Learn's built-in classification report, which returns [precision, recall,](https://en.wikipedia.org/wiki/Precision_and_recall) [f1-score](https://en.wikipedia.org/wiki/F1_score), and a column for support (meaning how many cases supported that classification).__ Check out the links for more detailed info on each of these metrics and the figure below:
<img src='https://upload.wikimedia.org/wikipedia/commons/thumb/2/26/Precisionrecall.svg/700px-Precisionrecall.svg.png' width=400 />
```
from sklearn.metrics import classification_report
print(classification_report(messages['label'], all_predictions))
```
There are quite a few possible metrics for evaluating model performance. Which one is the most important depends on the task and the business effects of decisions based off of the model. For example, the cost of mis-predicting "spam" as "ham" is probably much lower than mis-predicting "ham" as "spam".
In the above "evaluation", we evaluated accuracy on the same data we used for training. <font color=#DC281E>**You should never actually evaluate on the same dataset you train on!**</font>
Such evaluation tells us nothing about the true predictive power of our model. If we simply remembered each example during training, the accuracy on training data would trivially be 100%, even though we wouldn't be able to classify any new messages.
A proper way is to split the data into a training/test set, where the model only ever sees the **training data** during its model fitting and parameter tuning. The **test data** is never used in any way. This is then our final evaluation on test data is representative of true predictive performance.
## Train Test Split
```
from sklearn.model_selection import train_test_split
msg_train, msg_test, label_train, label_test = train_test_split(messages['message'], messages['label'], test_size=0.3)
print(len(msg_train), len(msg_test), len(msg_train) + len(msg_test))
```
The test size is 30% of the entire dataset (1672 messages out of total 5572), and the training is the rest (3900 out of 5572). Note: this is the default split(30/70).
## Creating a Data Pipeline
Let's run our model again and then predict off the test set. __We will use SciKit Learn's [pipeline](http://scikit-learn.org/stable/modules/pipeline.html) capabilities to store a pipeline of workflow.__ This will allow us to set up all the transformations that we will do to the data for future use. Let's see an example of how it works:
```
from sklearn.pipeline import Pipeline
pipeline = Pipeline([
# strings to token integer counts
('bow', CountVectorizer(analyzer=text_process)),
# integer counts to weighted TF-IDF scores
('tfidf', TfidfTransformer()),
# train on TF-IDF vectors w/ Naive Bayes classifier
('classifier', MultinomialNB()),
])
```
Now we can directly pass message text data and the pipeline will do our pre-processing for us! We can treat it as a model/estimator API:
```
pipeline.fit(msg_train, label_train)
predictions = pipeline.predict(msg_test)
print(classification_report(label_test, predictions))
```
Now we have a classification report for our model on a true testing set! You can try out different classification models. There is a lot more to Natural Language Processing than what we've covered here, and its vast expanse of topic could fill up several college courses! I encourage you to check out the resources below for more information on NLP!
## More Resources
Check out the links below for more info on Natural Language Processing:
[NLTK Book Online](http://www.nltk.org/book/)
[Kaggle Walkthrough](https://www.kaggle.com/c/word2vec-nlp-tutorial/details/part-1-for-beginners-bag-of-words)
[SciKit Learn's Tutorial](http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html)
# Good Job!
| github_jupyter |
```
import panel as pn
pn.extension()
```
The ``FloatSlider`` widget allows selecting selecting a numeric floating-point value within a set bounds using a slider.
For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb).
#### Parameters:
For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb).
##### Core
* **``start``** (float): The range's lower bound
* **``end``** (float): The range's upper bound
* **``step``** (float): The interval between values
* **``value``** (float): The selected value as a float type
* **``value_throttled``** (float): The selected value as a float type throttled until mouseup
##### Display
* **``bar_color``** (color): Color of the slider bar as a hexadecimal RGB value
* **``direction``** (str): Whether the slider should go from left to right ('ltr') or right to left ('rtl')
* **``disabled``** (boolean): Whether the widget is editable
* **``format``** (str, bokeh.models.TickFormatter): Formatter to apply to the slider value
* **``name``** (str): The title of the widget
* **``orientation``** (str): Whether the slider should be displayed in a 'horizontal' or 'vertical' orientation.
* **``tooltips``** (boolean): Whether to display tooltips on the slider handle
___
```
float_slider = pn.widgets.FloatSlider(name='Float Slider', start=0, end=3.141, step=0.01, value=1.57)
float_slider
```
The ``FloatSlider`` value is returned as a float and can be accessed and set like any other widget:
```
float_slider.value
```
A custom format string or bokeh TickFormatter may be used to format the slider values:
```
from bokeh.models.formatters import PrintfTickFormatter
str_format = pn.widgets.FloatSlider(name='Distance', format='1[.]00')
tick_format = pn.widgets.FloatSlider(name='Distance', format=PrintfTickFormatter(format='%.3f m'))
pn.Column(str_format, tick_format)
```
### Controls
The `FloatSlider` widget exposes a number of options which can be changed from both Python and Javascript. Try out the effect of these parameters interactively:
```
pn.Row(float_slider.controls(jslink=True), float_slider)
```
| github_jupyter |
In this note book, I
* replicate some of the simulations in the paers, and
* add some variations of my own.
<table align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/facebookresearch/mc/blob/master/notebooks/simulations_py.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
</table>
```
# install the mc package
!pip install -q git+https://github.com/facebookresearch/mc
#@title imports {form-width: "20%"}
import pandas as pd
import numpy as np
import mc
#@title function mc {form-width: "20%"}
def one_run(
N=400, # sample size for main data
n=100, # sample size for calibration data
pi=[0.2, 0.8], # true membership proportions
p=[[[0.9, 0.2], [0.1, 0.8]], [[0.9, 0.2], [0.1, 0.8]]], # miscalssification matrix
mu=[0.8, 0.4], # true mean of y
seed=123, # seed for random data generation
verbose=False, # if true, print details
):
# N = 400; n = 100; pi = [0.2, 0.8];
# p =[[[0.9, 0.2], [0.1, 0.8]], [[0.9, 0.2], [0.1, 0.8]]]
# mu = [0.8, 0.4]; seed=123
np.random.seed(seed)
pi = np.array(pi)
p = np.array(p)
mu = np.array(mu)
i = np.random.binomial(n=1, p=pi[1], size=N + n) # true group
y = np.random.binomial(n=1, p=mu[i]) # y_value depends on true group info i
j = np.random.binomial(n=1, p=p[y, 1, i]) # observed group
df = pd.DataFrame({
"sample": ["P"] * N + ["V"] * n,
"i": i, "j": j, "y": y, "y_squared": y ** 2,
})
# start calculation
df_P = df.query("sample == 'P'")
df_V = df.query("sample == 'V'")
n_jd_P = df_P.groupby("j").size().to_numpy()
y_sum_jd_P = df_P.groupby("j")["y"].sum().to_numpy()
n_ji_V = pd.crosstab(df_V["j"], df_V["i"]).to_numpy()
y_sum_ji_V = df_V.pivot_table(
index="j", columns="i", values="y", aggfunc=np.sum, fill_value=0).to_numpy()
y2_sum_ji_V = df_V.pivot_table(
index="j", columns="i", values="y_squared",
aggfunc=np.sum, fill_value=0).to_numpy()
# get estimates
mom = mc.mc_mom(n_jd_P, y_sum_jd_P, n_ji_V, y_sum_ji_V)
rmle = mc.mc_rmle(n_jd_P, y_sum_jd_P, n_ji_V, y_sum_ji_V, y2_sum_ji_V)
out = pd.concat(
(pd.DataFrame({"mu": mu}), mom, rmle),
axis=1
)
for col in out.columns:
if col not in ("mu", "mak_li_var"):
if any(~out[col].between(0., 1.)):
out[col] = np.nan
return out
#@title function simulation {form-width: "20%"}
def simulation(n_reps=1000, verbose=True, *args, **kwargs):
# n_reps=100; verbose=True; args=[]; kwargs={}
res = pd.concat([
one_run(seed=seed + 8101352, *args, **kwargs) for seed in range(n_reps)
])
pct_bad = res.isna().mean()
est_cols = [col for col in res.columns if col not in ("mu", "mak_li_var")]
err = res[est_cols].sub(res["mu"], axis=0)
bias = err.groupby(level=0).mean()
mse = err.groupby(level=0).apply(lambda df: (df ** 2).mean())
estimated_var = res.groupby(level=0)['mak_li_var'].mean().to_numpy()
empirical_var = res.groupby(level=0)["mak_li"].var().to_numpy()
right_direction = (
res[est_cols]
.diff().loc[1].apply(lambda x: (x[~np.isnan(x)] < 0).mean())
)
if verbose:
print(res.head(2))
print(f"\nbias: \n {bias}")
print(f"MSE: \n {mse}")
print(f"\n\n % with bad estimates:\n {pct_bad}")
print(f"\n\nestimated mak_li_var: {estimated_var}")
print(f"\n\nempirical mak_li_var: {empirical_var}")
print(f"\n\nright_direction:\n {right_direction}")
return {
"res":res, "err": err, "bias": bias, "mse": mse,
"pct_bad": pct_bad, "empirical_var": empirical_var,
"right_direction": right_direction
}
# simulation()
```
## simulations in paper
```
#@title p's that define simulation setup {form-width: "20%"}
p_a = [[[0.9, 0.2], [0.1, 0.8]], [[0.9, 0.2], [0.1, 0.8]]]
p_b = [[[0.8, 0.3], [0.2, 0.7]], [[0.8, 0.3], [0.2, 0.7]]]
p_c = [[[0.93, 0.23], [0.07, 0.77]], [[0.87, 0.17], [0.13, 0.83]]]
p_d = [[[0.95, 0.25], [0.05, 0.75]], [[0.85, 0.15], [0.15, 0.85]]]
#@title a {form-width: "20%"}
a = simulation(p=p_a)
#@title b {form-width: "20%"}
b = simulation(p=p_b)
#@title c {form-width: "20%"}
c = simulation(p=p_c)
#@title d {form-width: "20%"}
d = simulation(p=p_d)
```
## simulations with larger primary data
```
#@title a2 {form-width: "20%"}
big_N = 40_000
a2 = simulation(
N=big_N,
p=p_a
)
#@title b2 {form-width: "20%"}
b2 = simulation(
N=big_N,
p=p_a
)
#@title c2 {form-width: "20%"}
c2 = simulation(
N=big_N,
p=p_c
)
#@title d2 {form-width: "20%"}
d2 = simulation(
N=big_N,
p=p_d
)
```
## Collected results
```
#@title 1000 X bias and mse {form-width: "20%"}
setups = np.repeat(("a", "b", "c", "d", "a2", "b2", "c2", "d2"), 2)
setups = np.tile(setups, 2)
multipler = 1000
metrics = np.repeat((f"bia X {multipler}", f"mse X {multipler}"), len(setups)/2)
biases = pd.concat([
r["bias"] * multipler
for r in (a,b,c,d,a2,b2,c2,d2)
])
mses = pd.concat([
r["mse"] * multipler
for r in (a,b,c,d,a2,b2,c2,d2)
])
all = pd.concat([biases, mses])
all["setup"] = setups
all["metric"] = metrics
all["parameter"] = all.index.map({0: "mu1", 1: "mu2"})
all = (
all.sort_values(["parameter", "metric", "setup",])
[["parameter", "metric", "setup", "naive", "validation", "no_y_V", "with_y_V", "mak_li"]]
.round(2)
)
all
```
## smaller effect size
```
s1 = simulation(mu=[0.5, 0.4])
s2 = simulation(mu=[0.5, 0.45])
s3 = simulation(mu=[0.5, 0.48])
```
| github_jupyter |
```
import pymongo
import os
import pandas as pd
import yaml
from collections import Counter
from datetime import datetime
import sys
SRC = os.path.join(os.path.dirname(os.path.dirname(os.getcwd())), "src")
sys.path.append(SRC)
from content_api.details_utils import extract_from_details, cs_extract_text, cs_extract_links
### Get dirs
DATA_DIR = os.getenv("DATA_DIR")
config = os.path.join(SRC, "config")
black_list_path = os.path.join(config, "document_types_excluded_from_the_topic_taxonomy.yml")
### Get database running locally
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
print(myclient.list_database_names())
mydb = myclient["content_store"]
mycol = mydb["content_items"]
with open(black_list_path, 'r') as stream:
blacklisted_content_page = sorted(yaml.load(stream)['document_types'])
blacklisted_content_page[0:5]
keep_columns = \
['_id',
# 'access_limited',
# 'analytics_identifier',
'content_id',
'content_purpose_document_supertype',
'content_purpose_subgroup',
'content_purpose_supergroup',
# 'created_at',
'description',
'details',
'document_type',
'email_document_supertype',
# 'expanded_links',
'first_published_at',
# 'format',
'government_document_supertype',
# 'links',
'locale',
'navigation_document_supertype',
# 'need_ids',
# 'payload_version',
'phase',
'public_updated_at',
'publishing_app',
# 'publishing_request_id',
'publishing_scheduled_at',
# 'redirects',
'rendering_app',
# 'routes',
# 'scheduled_publishing_delay_seconds',
# 'schema_name',
'search_user_need_document_supertype',
'title',
'updated_at',
'user_journey_document_supertype'
# 'withdrawn_notice'
]
links_keep = \
[
'organisations',
'primary_publishing_organisation',
'taxons',
# 'finder',
# 'available_translations',
'mainstream_browse_pages',
# 'parent',
'part_of_step_navs',
'ordered_related_items',
# 'meets_user_needs',
'topics',
'ordered_related_items_overrides',
'pages_part_of_step_nav',
'pages_related_to_step_nav',
'related_to_step_navs',
# 'children',
'document_collections',
# 'lead_organisations',
# 'world_locations',
# 'worldwide_organisations',
# 'supporting_organisations',
# 'worldwide_priorities',
# 'original_primary_publishing_organisation',
'documents',
'policy_areas',
# 'topical_events',
# 'suggested_ordered_related_items',
'related_policies',
# 'ministers',
# 'people',
# 'roles',
# 'field_of_operation'
]
keep_keys = \
[
# 'analytics_identifier',
# 'api_path',
'base_path',
'content_id',
# 'description',
# 'document_type',
# 'locale',
# 'schema_name',
# 'title',
# 'withdrawn',
# 'details',
# 'links'
]
def handle_expanded_links(content_links, row_dict):
for key,value in content_links.items():
if key in links_keep:
row_dict[key] = []
for item in value:
row = {}
for k in keep_keys:
if k in item.keys():
row[k] = item[k]
row_dict[key].append(row)
mydoc = mycol.find({ "$and": [
{ "document_type": {"$not" : { "$in": blacklisted_content_page}}},
{ "phase": "live"}]})
print("Started:",datetime.now().strftime("%H:%M:%S"))
rowlist = []
for i,item in enumerate(mydoc):
if i < 50000:
row = {key:value for key,value in item.items() if key in keep_columns}
# row['body'] = extract_from_details(item['details'], "text")
# row['embedded_links'] = extract_from_details(item['details'], "links")
if "expanded_links" in item.keys():
handle_expanded_links(item["expanded_links"], row)
rowlist.append(row)
else:
break
if i % 10000==0:
print(i,datetime.now().strftime("%H:%M:%S"))
print("Ended:",datetime.now().strftime("%H:%M:%S"))
df = pd.DataFrame(rowlist)
df.shape
df.iloc[0].details
df.details.iloc[0]['body'][0].keys()
target = "parts"
for det in df.details.values:
if target in det.keys():
for item in det[target]:
print(item.keys())
if "body" in item.keys():
print(item['body'],"\n")
# print(det[target])
# print([item['body'] for item in det[target]])
# print("".join([d['content'] for d in det[target]\
# if d['content_type'] == "text/html" ]))
break
Counter([d for det in df.details.values for d in det.keys()])
for item in df.details.iloc[0]['headers']:
print(item)
df.details.iloc[0]['metadata']
cs_extract_text(df.details.iloc[1000])
cs_extract_links(df.details.iloc[1010])
target = "transaction_start_link"
for i,det in enumerate(df.details.values):
if target in det.keys():
print(i)
# for item in det[target]:
# print(item)
# if "body" in item.keys():
# print(item['body'],"\n")
# print(det[target])
# print([item['body'] for item in det[target]])
# print("".join([d['content'] for d in det[target]\
# if d['content_type'] == "text/html" ]))
break
dets = df.iloc[10554].details
dets['transaction_start_link']
cs_extract_links(dets)
cs_extract_text(dets)
df['body'] = df.details.map(cs_extract_text)
df['body'].iloc[0]
```
| github_jupyter |
Blankenbach Benchmark Case 2a
======
Temperature dependent convection
----
This is a benchmark case of two-dimensional, incompressible, bottom heated, temperature dependent convection. This example is based on case 2a in Blankenbach *et al.* 1989 for a single Rayleigh number ($Ra = 10^7$).
Here a temperature field that is already in equilibrium is loaded and a single Stokes solve is used to get the velocity and pressure fields. A few advection time steps are carried out as a demonstration of the new viscosity function.
**This lesson introduces the concepts of:**
1. material rheologies with functional dependencies
**Keywords:** Stokes system, advective diffusive systems, analysis tools, tools for post analysis, rheologies
**References**
1. B. Blankenbach, F. Busse, U. Christensen, L. Cserepes, D. Gunkel, U. Hansen, H. Harder, G. Jarvis, M. Koch, G. Marquart, D. Moore, P. Olson, H. Schmeling and T. Schnaubelt. A benchmark comparison for mantle convection codes. Geophysical Journal International, 98, 1, 23–38, 1989
http://onlinelibrary.wiley.com/doi/10.1111/j.1365-246X.1989.tb05511.x/abstract
```
import numpy as np
import underworld as uw
import math
from underworld import function as fn
import glucifer
```
Setup parameters
-----
Set simulation parameters for test.
```
Temp_Min = 0.0
Temp_Max = 1.0
res = 128
```
**Set physical values in SI units**
```
alpha = 2.5e-5
rho = 4e3
g = 10
dT = 1e3
h = 1e6
kappa = 1e-6
eta = 2.5e19
```
**Set viscosity function constants as per Case 2a**
```
Ra = 1e7
eta0 = 1.0e3
```
**Input file path**
Set input directory path
```
inputPath = 'input/1_04_BlankenbachBenchmark_Case2a/'
outputPath = 'output/'
# Make output directory if necessary.
if uw.rank()==0:
import os
if not os.path.exists(outputPath):
os.makedirs(outputPath)
```
Create mesh and finite element variables
------
```
mesh = uw.mesh.FeMesh_Cartesian( elementType = ("Q1/dQ0"),
elementRes = (res, res),
minCoord = (0., 0.),
maxCoord = (1., 1.))
velocityField = mesh.add_variable( nodeDofCount=2 )
pressureField = mesh.subMesh.add_variable( nodeDofCount=1 )
temperatureField = mesh.add_variable( nodeDofCount=1 )
temperatureDotField = mesh.add_variable( nodeDofCount=1 )
```
Initial conditions
-------
Load an equilibrium case with 128$\times$128 resolution and $Ra = 10^7$. This can be changed as per **1_03_BlankenbachBenchmark** if required.
```
meshOld = uw.mesh.FeMesh_Cartesian( elementType = ("Q1/dQ0"),
elementRes = (128, 128),
minCoord = (0., 0.),
maxCoord = (1., 1.),
partitioned = False )
temperatureFieldOld = meshOld.add_variable( nodeDofCount=1 )
temperatureFieldOld.load( inputPath + 'tempfield_case2_128_Ra1e7_10000.h5' )
temperatureField.data[:] = temperatureFieldOld.evaluate( mesh )
temperatureDotField.data[:] = 0.
velocityField.data[:] = [0.,0.]
pressureField.data[:] = 0.
```
**Plot initial temperature**
```
figtemp = glucifer.Figure()
figtemp.append( glucifer.objects.Surface(mesh, temperatureField) )
figtemp.show()
```
**Boundary conditions**
```
for index in mesh.specialSets["MinJ_VertexSet"]:
temperatureField.data[index] = Temp_Max
for index in mesh.specialSets["MaxJ_VertexSet"]:
temperatureField.data[index] = Temp_Min
iWalls = mesh.specialSets["MinI_VertexSet"] + mesh.specialSets["MaxI_VertexSet"]
jWalls = mesh.specialSets["MinJ_VertexSet"] + mesh.specialSets["MaxJ_VertexSet"]
freeslipBC = uw.conditions.DirichletCondition( variable = velocityField,
indexSetsPerDof = ( iWalls, jWalls) )
tempBC = uw.conditions.DirichletCondition( variable = temperatureField,
indexSetsPerDof = ( jWalls, ) )
```
Set up material parameters and functions
-----
Setup the viscosity to be a function of the temperature. Recall that these functions and values are preserved for the entire simulation time.
```
b = math.log(eta0)
T = temperatureField
fn_viscosity = eta0 * fn.math.exp( -1.0 * b * T )
densityFn = Ra*temperatureField
gravity = ( 0.0, 1.0 )
buoyancyFn = gravity*densityFn
```
**Plot the initial viscosity**
Plot the viscosity, which is a function of temperature, using the initial temperature conditions set above.
```
figEta = glucifer.Figure()
figEta.append( glucifer.objects.Surface(mesh, fn_viscosity) )
figEta.show()
```
System setup
-----
Since we are using a previously constructed temperature field, we will use a single Stokes solve to get consistent velocity and pressure fields.
**Setup a Stokes system**
```
stokes = uw.systems.Stokes( velocityField = velocityField,
pressureField = pressureField,
conditions = [freeslipBC,],
fn_viscosity = fn_viscosity,
fn_bodyforce = buoyancyFn )
```
**Set up and solve the Stokes system**
```
solver = uw.systems.Solver(stokes)
solver.solve()
```
**Create an advective diffusive system**
```
advDiff = uw.systems.AdvectionDiffusion( temperatureField, temperatureDotField, velocityField, fn_diffusivity = 1.,
conditions = [tempBC,], )
```
Analysis tools
-----
**Nusselt number**
```
nuTop = uw.utils.Integral( fn=temperatureField.fn_gradient[1], mesh=mesh, integrationType='Surface',
surfaceIndexSet=mesh.specialSets["MaxJ_VertexSet"])
Nu = -nuTop.evaluate()[0]
if(uw.rank()==0):
print('Initial Nusselt number = {0:.3f}'.format(Nu))
```
**RMS velocity**
```
v2sum_integral = uw.utils.Integral( mesh=mesh, fn=fn.math.dot( velocityField, velocityField ) )
volume_integral = uw.utils.Integral( mesh=mesh, fn=1. )
Vrms = math.sqrt( v2sum_integral.evaluate()[0] )/volume_integral.evaluate()[0]
if(uw.rank()==0):
print('Initial Vrms = {0:.3f}'.format(Vrms))
```
**Temperature gradients at corners**
Uses global evaluate function which can evaluate across multiple processors, but may slow the model down for very large runs.
```
def calcQs():
q1 = temperatureField.fn_gradient[1].evaluate_global( (0., 1.) )
q2 = temperatureField.fn_gradient[1].evaluate_global( (1., 1.) )
q3 = temperatureField.fn_gradient[1].evaluate_global( (1., 0.) )
q4 = temperatureField.fn_gradient[1].evaluate_global( (0., 0.) )
return q1, q2, q3, q4
q1, q2, q3, q4 = calcQs()
if(uw.rank()==0):
print('Initial T gradients = {0:.3f}, {1:.3f}, {2:.3f}, {3:.3f}'.format(q1[0][0], q2[0][0], q3[0][0], q4[0][0]))
```
Main simulation loop
-----
Run a few advection and Stokes solver steps to make sure we are in, or close to, equilibrium.
```
time = 0.
step = 0
step_end = 4
# define an update function
def update():
# Determining the maximum timestep for advancing the a-d system.
dt = advDiff.get_max_dt()
# Advect using this timestep size.
advDiff.integrate(dt)
return time+dt, step+1
while step < step_end:
# solve Stokes and advection systems
solver.solve()
# Calculate the RMS velocity and Nusselt number.
Vrms = math.sqrt( v2sum_integral.evaluate()[0] )/volume_integral.evaluate()[0]
Nu = -nuTop.evaluate()[0]
q1, q2, q3, q4 = calcQs()
if(uw.rank()==0):
print('Step {0:2d}: Vrms = {1:.3f}; Nu = {2:.3f}; q1 = {3:.3f}; q2 = {4:.3f}; q3 = {5:.3f}; q4 = {6:.3f}'
.format(step, Vrms, Nu, q1[0][0], q2[0][0], q3[0][0], q4[0][0]))
# update
time, step = update()
```
Comparison of benchmark values
-----
Compare values from Underworld against those from Blankenbach *et al.* 1989 for case 2a in the table below.
```
if(uw.rank()==0):
print('Nu = {0:.3f}'.format(Nu))
print('Vrms = {0:.3f}'.format(Vrms))
print('q1 = {0:.3f}'.format(q1[0][0]))
print('q2 = {0:.3f}'.format(q2[0][0]))
print('q3 = {0:.3f}'.format(q3[0][0]))
print('q4 = {0:.3f}'.format(q4[0][0]))
np.savetxt(outputPath+'summary.txt', [Nu, Vrms, q1, q2, q3, q4])
```
| $Ra$ | $Nu$ | $v_{rms}$| $q_1$ | $q_2$ | $q_3$ | $q_4$ |
|:------:|:--------:|:-------:|:--------:|:-------:|:-------:|:--------:|
| 10$^7$ | 10.0660 | 480.4 | 17.53136 | 1.00851 | 26.8085 | 0.497380 |
| github_jupyter |
#Demo: Interpolator
*This script provides a few examples on using the Interpolator class.
Last updated: April 14, 2015.
Copyright (C) 2014 Randall Romero-Aguilar
Licensed under the MIT license, see LICENSE.txt*
*Interpolator* is a subclass of *Basis*.
* A *Basis* object contains data to compute the interpolation matrix $\Phi(x)$ at arbitrary values of $x$ (within the interpolation box).
* An *Interpolator* is used to interpolate a given function $f(x)$, when the value of $f$ is known at the basis nodes. It adds methods to the *Basis* class to compute interpolation coefficients $c$ and to interpolate $f$ at arbitrary $x$.
Evaluation of this objects is straighforward:
* If B is a Basis, then B(x, k) computes the interpolating matrix $D^{(k)}\Phi(x)$, the k-derivative of $\Phi$
* If V is an Interpolator, then V(x, k) interpolates $D^{(k)}f(x)$, the k-derivative of $f$.
```
%matplotlib notebook
import numpy as np
from compecon import Basis, Interpolator
import matplotlib.pyplot as plt
import seaborn as sns
import time
np.set_printoptions(precision=3, suppress=True)
#sns.set(style='whitegrid')
```
##EXAMPLE 1:
Using BasisChebyshev to interpolate a 1-D function with a Chebyshev basis
**PROBLEM:** Interpolate the function $y = f(x) = 1 + sin(2x)^2$ on the domain $[0,\pi]$, using 5 Gaussian nodes.
There are two ways to create an *Interpolator* object.
* Defining the basis first, then the interpolator with known function values at nodes
```
f = lambda x: (1 + np.sin(2*x)**2)
B = Basis(5,0,np.pi)
V = Interpolator(B, y=f(B.nodes))
```
We are goint to make the same plot several times, so define it with a function
```
xx = np.linspace(0,np.pi,120)
def plot(P):
plt.figure()
plt.plot(xx,f(xx))
plt.plot(xx,P(xx))
plt.scatter(P.nodes,P(),color='red') #add nodes
```
Plot the interpolation at a refined grid. Notice how the interpolation is exact at the nodes.
```
plot(V)
```
* The second option is to create the *Interpolator* just as a regular *Basis*, adding the known function values at the next step. The difference is that we end up with only one object.
```
S = Interpolator(5,0,np.pi)
S.y = f(S.nodes)
plot(S)
```
* When we have a callable object (like the lambda f) we can pass it directly to the constructor, which will evaluate it at the nodes:
```
U = Interpolator(5,0,np.pi, y = f)
plot(U)
```
### Let's time it
Interpolate the first derivative of $S$ at the $xx$ values, repeat $10^4$ times.
```
t0 = time.time()
for k in range(10000):
S(xx, 1)
time.time() - t0
```
In MATLAB, it takes around 6 seconds!!
| github_jupyter |
```
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
MAX_EVALUE = 1e-2
```
# Data Overview
This notebook provides an overview of source datasets - training, testing and 3k bacteria.
# Training data
## ClusterFinder BGCs (positives)
** Used for: Model training **
CSV file with protein domains in genomic order. Contigs (samples) are defined by the `contig_id` column.
```
domains = pd.read_csv('../data/training/positive/CF_bgcs.csv')
domains = domains[domains['evalue'] <= MAX_EVALUE]
domains.head()
num_contigs = len(domains['contig_id'].unique())
num_contigs
contig_proteins = domains.groupby("contig_id")['protein_id'].nunique()
fig, axes = plt.subplots(1, 2, figsize=(12, 4))
ax = contig_proteins.hist(bins=40, ax=axes[0])
ax.set_xlabel('# proteins in BGC sample')
ax.set_ylabel('Frequency')
ax = contig_proteins.hist(bins=100, ax=axes[1], cumulative=True)
ax.set_xlabel('# proteins in BGC sample')
ax.set_ylabel('Cumulative frequency')
plt.tight_layout()
contig_domains = domains.groupby("contig_id")['pfam_id'].size()
ax = contig_domains.hist(bins=20)
ax.set_xlabel('# domains in BGC sample')
ax.set_ylabel('Frequency')
```
## MIBiG BGCs (positives)
** Used for: LCO validation, 10-fold Cross-validation **
CSV file with protein domains in genomic order. Contigs (samples) are defined by the `contig_id` column.
```
domains = pd.read_csv('../data/training/positive/mibig_bgcs_all.csv')
domains = domains[domains['evalue'] <= MAX_EVALUE]
domains.head()
num_contigs = len(domains['contig_id'].unique())
num_contigs
contig_proteins = domains.groupby("contig_id")['protein_id'].nunique()
fig, axes = plt.subplots(1, 2, figsize=(12, 4))
ax = contig_proteins.hist(bins=40, ax=axes[0])
ax.set_xlabel('# proteins in BGC sample')
ax.set_ylabel('Frequency')
ax = contig_proteins.hist(bins=100, ax=axes[1], cumulative=True)
ax.set_xlabel('# proteins in BGC sample')
ax.set_ylabel('Cumulative frequency')
plt.tight_layout()
contig_domains = domains.groupby("contig_id")['pfam_id'].size()
ax = contig_domains.hist(bins=20)
ax.set_xlabel('# domains in BGC sample')
ax.set_ylabel('Frequency')
contig_domains.describe(percentiles=np.arange(0, 1, 0.05))
properties = pd.read_csv('../data/mibig/mibig_properties.csv')
properties.head()
properties['classes'].value_counts().plot.barh(figsize=(5, 10))
classes_split = properties['classes'].apply(lambda c: c.split(';'))
class_counts = pd.Series([c for classes in classes_split for c in classes]).value_counts()
class_counts.plot.barh()
print(class_counts)
```
# GeneSwap negatives
** Used for: Model training, LCO validation **
```
domains = pd.read_csv('../data/training/negative/geneswap_negatives.csv')
domains = domains[domains['evalue'] <= MAX_EVALUE]
domains.head()
num_contigs = len(domains['contig_id'].unique())
num_contigs
contig_proteins = domains.groupby("contig_id")['protein_id'].nunique()
fig, axes = plt.subplots(1, 2, figsize=(12, 4))
ax = contig_proteins.hist(bins=40, ax=axes[0])
ax.set_xlabel('# proteins in negative sample')
ax.set_ylabel('Frequency')
ax = contig_proteins.hist(bins=100, ax=axes[1], cumulative=True)
ax.set_xlabel('# proteins in negative sample')
ax.set_ylabel('Cumulative frequency')
plt.tight_layout()
contig_domains = domains.groupby("contig_id")['pfam_id'].size()
ax = contig_domains.hist(bins=20)
ax.set_xlabel('# domains in negative sample')
ax.set_ylabel('Frequency')
```
# Validation and testing data
## ClusterFinder labelled contigs
** Used for: Model validation - ROC curves **
10 labelled genomes (13 contigs) with non-BGC and BGC regions (stored in `in_cluster` column for each domain)
```
contigs = pd.read_csv('../data/clusterfinder/labelled/CF_labelled_contig_summary.csv', sep=';')
contigs
domains = pd.read_csv('../data/clusterfinder/labelled/CF_labelled_contigs_domains.csv')
domains = domains[domains['evalue'] <= MAX_EVALUE]
domains.head()
def count_y_clusters(y):
prev = 0
clusters = 0
for val in y:
if val == 1 and prev == 0:
clusters += 1
prev = val
return clusters
```
### non-BGC and BGC regions
```
for contig_id, contig_domains in domains.groupby('contig_id'):
in_cluster = contig_domains.reset_index()['in_cluster']
num_bgcs = count_y_clusters(in_cluster)
title = '{} ({} BGCs)'.format(contig_id, num_bgcs)
ax = in_cluster.plot(figsize=(15, 1), title=title, color='grey', lw=1)
in_cluster.plot(kind='area', ax=ax, color='grey', alpha=0.2)
plt.show()
```
## ClusterFinder 75 BGCs in genomic context
** Used for: Model validation - TPR evaluation **
6 labelled genomes with annotated BGC regions. Remaining regions are not known.
75 BGCs are annotated (10 are duplicates found twice, so only 65 are unique)
```
bgc75_locations = pd.read_csv('../data/clusterfinder/74validation/74validation_locations.csv')
bgc75_locations.head()
fig, axes = plt.subplots(len(bgc75_locations['Accession'].unique()), figsize=(8, 8))
i = 0
for contig_id, contig_bgcs in bgc75_locations.groupby('Accession'):
num_bgcs = len(contig_bgcs)
title = '{} ({} BGCs)'.format(contig_id, num_bgcs)
axes[i].set_title(title)
axes[i].set_ylim([0, 1.2])
for b, bgc in contig_bgcs.iterrows():
axes[i].plot([bgc['BGC_start'], bgc['BGC_stop']], [1, 1], color='grey')
axes[i].fill_between([bgc['BGC_start'], bgc['BGC_stop']], [1, 1], color='grey', alpha=0.3)
i += 1
plt.tight_layout()
bgc75_domains = pd.read_csv('../data/clusterfinder/74validation/74validation_domains.csv')
bgc75_domains = bgc75_domains[bgc75_domains['evalue'] <= MAX_EVALUE]
bgc75_domains.head()
```
# 3k reference genomes
3376 bacterial genomes, preprocessed using Prodigal & Pfam Hmmscan.
## Reference genomes species
```
bac_species = pd.read_csv('../data/bacteria/species.tsv', sep='\t').set_index('contig_id')
bac_species['family'] = bac_species['species'].apply(lambda species: species.split('_')[0])
bac_species['subspecies'] = bac_species['species'].apply(lambda species: ' '.join(species.split('_')[:2]))
bac_species.head()
bac_families_top = bac_species['family'].value_counts()[:20]
print('Unique families:', len(bac_species['family'].unique()))
bac_families_top
bac_species_top = bac_species['subspecies'].value_counts()[:20]
print('Unique species:', len(bac_species['subspecies'].unique()))
bac_species_top
```
## Reference genomes domains
** Used for: Pfam2vec corpus generation, Novel BGC candidate prediction **
Domain CSV files, one for each bacteria.
```
bac_domains = pd.read_csv('../data/bacteria/domains/AE000511.1.domains.csv', nrows=10)
bac_domains.head()
```
## Reference genomes pfam corpus
** Used for: Pfam2vec training **
Corpus of 23,425,967 pfams domains (words) used to train the pfam2vec embedding using the word2vec algorithm.
Corpus contains pfam domains from one bacteria per line, separated by space.
```
corpus = pd.read_csv('../data/bacteria/corpus/corpus-1e-02.txt', nrows=10, header=None)
corpus.head()
corpus_counts = pd.read_csv('../data/bacteria/corpus/corpus-1e-02.counts.csv').set_index('pfam_id')
corpus_counts[:10][::-1].plot.barh()
```
The pfam counts have a very long-tail distribution with a median of only 101 occurences.
```
corpus_counts.plot.hist(bins=100)
print(corpus_counts.describe())
```
| github_jupyter |
# Homework: Understanding Performance using a LinkedIn Dataset
This homework focuses on understanding performance using a LinkedIn dataset. It is the same dataset that was used in the module entitled "Modeling Data and Knowledge".
```
!pip install pandas
!pip install numpy
!pip install matplotlib
!pip install pymongo[tls,srv]
!pip install lxml
import pandas as pd
import numpy as np
import json
import sqlite3
from lxml import etree
import urllib
import zipfile
import time
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError, OperationFailure
from sklearn.utils import shuffle
```
# Step 1: Acquire and load the data
We will pull a zipfile with the LinkedIn dataset from an url / Google Drive so that it can be efficiently parsed locally. The detailed steps are covered by "Modeling Data and Knowledge" Module, and you should refer to the instructor notes of that module if you haven't done so.
The cell below will download/open the file, and may take a while.
```
url = 'https://raw.githubusercontent.com/chenleshang/OpenDS4All/master/Module3/homework3filewrapper.py'
urllib.request.urlretrieve(url,filename='homework3filewrapper.py')
# url = 'https://upenn-bigdataanalytics.s3.amazonaws.com/linkedin.zip'
# filehandle, _ = urllib.request.urlretrieve(url,filename='local.zip')
```
The next cell creates a pointer to the (abbreviated) LinkedIn dataset, and imports a script that will be used to prepare the dataset to manipulate in this homework.
```
def fetch_file(fname):
zip_file_object = zipfile.ZipFile(filehandle, 'r')
for file in zip_file_object.namelist():
file = zip_file_object.open(file)
if file.name == fname: return file
return None
# linked_in = fetch_file('test_data_10000.json')
from homework3filewrapper import *
```
The next cell replays the data preparation for the LinkedIn dataset done in the module "Modeling Data and Knowledge". After this, you should have eleven dataframes with the following names. The first nine are as in the lecture notebook; the last two are constructed using queries over the first nine, and their meanings are given below.
1. `people_df`
2. `names_df`: Stores the first and last name of each person indexed by ID.
3. `education_df`
4. `groups_df`
5. `skills_df`
6. `experience_df`
7. `honors_df`
8. `also_view_df`
9. `events_df`
10. `recs_df`: 20 pairs of people with the most shared/common skills in descending order. We will use this to make a recommendation for a potential employer and position to each person.
11. `last_job_df`: Person name, and the title and org corresponding to the person's last (most recent) employment experience (a three column dataframe).
The number of rows that are extracted from the dataset can be changed using LIMIT. Here, we are limiting it to 10,000; you can set it to something much smaller (e.g. 1,000) while debugging your code.
The data is also being stored in an SQLite database so that you can see the effect of indexing on the performance of queries.
```
# If use a file on Google Drive, then mount it to Colab.
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
# use open() to open a local file, or to use fetch_file() to get that file from a remote zip file.
people_df, names_df, education_df, groups_df, skills_df, experience_df, honors_df, also_view_df, events_df, recs_df, last_job_df =\
data_loading(file=open('/content/drive/My Drive/Colab Notebooks/test_data_10000.json'), dbname='linkedin.db', filetype='localobj', LIMIT=10000)
conn = sqlite3.connect('linkedin.db')
# Sanity Check 1.1 - please do not modify or delete this cell!
recs_df
# Sanity Check 1.2 - please do not modify or delete this cell!
names_df
# Sanity Check 1.3 - please do not modify or delete this cell!
last_job_df
```
# Step 2: Compare Evaluation Orders using DataFrames
We will now explore the effect of various optimizations, including reordering execution steps and (in the case of database operations) creating indices.
We'll start with the code from our lecture notebooks, which does joins between dataframes. The next cell creates two functions, merge and merge_map, which we explore in terms of efficiency. **You do not need to modify this cell.**
```
# Join using nested loops
def merge(S,T,l_on,r_on):
ret = pd.DataFrame()
count = 0
S_ = S.reset_index().drop(columns=['index'])
T_ = T.reset_index().drop(columns=['index'])
for s_index in range(0, len(S)):
for t_index in range(0, len(T)):
count = count + 1
if S_.loc[s_index, l_on] == T_.loc[t_index, r_on]:
ret = ret.append(S_.loc[s_index].append(T_.loc[t_index].drop(labels=r_on)), ignore_index=True)
print('Merge compared %d tuples'%count)
return ret
# Join using a *map*, which is a kind of in-memory index
# from keys to (single) values
def merge_map(S,T,l_on,r_on):
ret = pd.DataFrame()
T_map = {}
count = 0
# Take each value in the r_on field, and
# make a map entry for it
T_ = T.reset_index().drop(columns=['index'])
for t_index in range(0, len(T)):
# Make sure we aren't overwriting an entry!
assert (T_.loc[t_index,r_on] not in T_map)
T_map[T_.loc[t_index,r_on]] = T_.loc[t_index]
count = count + 1
# Now find matches
S_ = S.reset_index().drop(columns=['index'])
for s_index in range(0, len(S)):
count = count + 1
if S_.loc[s_index, l_on] in T_map:
ret = ret.append(S_.loc[s_index].append(T_map[S_.loc[s_index, l_on]].drop(labels=r_on)), ignore_index=True)
print('Merge compared %d tuples'%count)
return ret
```
## Step 2.1: Find a good order of evaluation.
The following function, `recommend_jobs_basic`, takes as input `recs_df`, `names_df` and `last_job_df` and returns the name of each `person_1` and the most recent `title` and `org` of each `person_2`.
We will time how long it takes to execute `recommend_jobs_basic` using the ordering `recs_df`, `names_df` and `last_job_df`.
Your task is to improve this time by changing the join ordering used in `recommend_jobs_basic`.
```
def recommend_jobs_basic(recs_df, names_df, last_job_df):
return merge(merge(recs_df,names_df,'person_1','person')[['family_name','given_name','person_1','person_2']],
last_job_df,'person_2','person')[['family_name','given_name','person_2','org','title']].sort_values('family_name')
```
```
%%time
recs_new_df = recommend_jobs_basic(recs_df, names_df, last_job_df)
if(len(recs_new_df.columns) != 5):
raise AssertionError('Wrong number of columns in recs_new_df')
```
Modify the function `recommend_jobs_basic` in the cell below. See if it is possible to improve the efficiency by changing the join ordering to reduce the number of comparisons made in the `merge` function.
```
# TODO: modify the order of joins to reduce comparisons
def recommend_jobs_basic_reordered(recs_df, names_df, last_job_df):
# YOUR CODE HERE
%%time
recs_new_df = recommend_jobs_basic_reordered(recs_df, names_df, last_job_df)
if(len(recs_new_df.columns) != 5):
raise AssertionError('Wrong number of columns in recs_new_df')
names_df
recs_df
last_job_df
```
## Step 2.2: Perform selections early using `merge` and `merge_map`
Reimplement `recommend_jobs_basic` using the `merge` and `merge_map` functions instead of Pandas' merge. Try to find the **most efficient** way by also considering the ordering.
```
# TODO: Reimplement recommend jobs using our custom merge and merge_map functions
def recommend_jobs_new(recs_df, names_df, last_job_df):
# YOUR CODE HERE
# Sanity Check 2.1 - please do not modify or delete this cell!
%%time
recs_new_df = recommend_jobs_new(recs_df, names_df, last_job_df)
if(len(recs_new_df.columns) != 5):
raise AssertionError('Wrong number of columns in recs_new_df')
```
# Step 3. Query Optimization in Databases
Relational databases optimize queries by performing selections (and projections) as early as possible, and finding a good join ordering. We will therefore implement the recommend_jobs function using SQLite and see if it is faster.
Dataframes `names_df`, `rec_df` and `last_job_df` are already stored in database `linkedin.db` with table name `names`, `recs` and `lastjob`.
## Step 3.1
In the cell below, implement the `recommend_jobs_basic` function in SQL. Since the query is very fast, we will run the query 100 times to get an accurate idea of the execution time.
```
%%time
for i in range(0, 100):
# YOUR CODE HERE
```
## Step 3.2
Altough the execution is pretty fast, we can also create indices to make it even faster. Use the syntax `CREATE INDEX I ON T(C)` to create index on the three tables `recs`, `names`, and `lastjob`. Replace `I` with the name of the index that you wish to use, `T` with the name of the table and `C` with the name of the column.
If you need to change the indices, you must drop them first using the following syntax:
`conn.execute('drop index if exists I')`
where I is the name of the index to be dropped.
```
conn.execute('begin transaction')
# YOUR CODE HERE
conn.execute('commit')
```
In the cell below, rerun the query that you defined in Step 3.1 100 times get a new timing. The database will now use the indices that you created if they are beneficial to the execution.
Is the query faster?
```
%%time
for i in range(0, 100):
# YOUR CODE HERE
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Beyond Hello World, A Computer Vision Example
In the previous exercise you saw how to create a neural network that figured out the problem you were trying to solve. This gave an explicit example of learned behavior. Of course, in that instance, it was a bit of overkill because it would have been easier to write the function Y=2x-1 directly, instead of bothering with using Machine Learning to learn the relationship between X and Y for a fixed set of values, and extending that for all values.
But what about a scenario where writing rules like that is much more difficult -- for example a computer vision problem? Let's take a look at a scenario where we can recognize different items of clothing, trained from a dataset containing 10 different types.
## Start Coding
Let's start with our import of TensorFlow.
(**Note:** You can run the notebook using TensorFlow 2.5.0)
```
#!pip install tensorflow==2.5.0
import tensorflow as tf
print(tf.__version__)
```
The Fashion MNIST data is available directly in the tf.keras datasets API. You load it like this:
```
mnist = tf.keras.datasets.fashion_mnist
```
Calling load_data on this object will give you two sets of two lists, these will be the training and testing values for the graphics that contain the clothing items and their labels.
```
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
```
What does these values look like? Let's print a training image, and a training label to see...Experiment with different indices in the array. For example, also take a look at index 42...that's a a different boot than the one at index 0
```
import numpy as np
np.set_printoptions(linewidth=200)
import matplotlib.pyplot as plt
plt.imshow(training_images[789])
print(training_labels[789])
print(training_images[789])
```
You'll notice that all of the values in the number are between 0 and 255. If we are training a neural network, for various reasons it's easier if we treat all values as between 0 and 1, a process called '**normalizing**'...and fortunately in Python it's easy to normalize a list like this without looping. You do it like this:
```
training_images = training_images / 255.0
test_images = test_images / 255.0
```
Now you might be wondering why there are 2 sets...training and testing -- remember we spoke about this in the intro? The idea is to have 1 set of data for training, and then another set of data...that the model hasn't yet seen...to see how good it would be at classifying values. After all, when you're done, you're going to want to try it out with data that it hadn't previously seen!
Let's now design the model. There's quite a few new concepts here, but don't worry, you'll get the hang of them.
```
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
```
**Sequential**: That defines a SEQUENCE of layers in the neural network
**Flatten**: Remember earlier where our images were a square, when you printed them out? Flatten just takes that square and turns it into a 1 dimensional set.
**Dense**: Adds a layer of neurons
Each layer of neurons need an **activation function** to tell them what to do. There's lots of options, but just use these for now.
**Relu** effectively means "If X>0 return X, else return 0" -- so what it does it it only passes values 0 or greater to the next layer in the network.
**Softmax** takes a set of values, and effectively picks the biggest one, so, for example, if the output of the last layer looks like [0.1, 0.1, 0.05, 0.1, 9.5, 0.1, 0.05, 0.05, 0.05], it saves you from fishing through it looking for the biggest value, and turns it into [0,0,0,0,1,0,0,0,0] -- The goal is to save a lot of coding!
The next thing to do, now the model is defined, is to actually build it. You do this by compiling it with an optimizer and loss function as before -- and then you train it by calling **model.fit ** asking it to fit your training data to your training labels -- i.e. have it figure out the relationship between the training data and its actual labels, so in future if you have data that looks like the training data, then it can make a prediction for what that data would look like.
```
model.compile(optimizer = tf.optimizers.Adam(),
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=5)
```
Once it's done training -- you should see an accuracy value at the end of the final epoch. It might look something like 0.9098. This tells you that your neural network is about 91% accurate in classifying the training data. I.E., it figured out a pattern match between the image and the labels that worked 91% of the time. Not great, but not bad considering it was only trained for 5 epochs and done quite quickly.
But how would it work with unseen data? That's why we have the test images. We can call model.evaluate, and pass in the two sets, and it will report back the loss for each. Let's give it a try:
```
model.evaluate(test_images, test_labels)
```
For me, that returned a accuracy of about .8838, which means it was about 88% accurate. As expected it probably would not do as well with *unseen* data as it did with data it was trained on! As you go through this course, you'll look at ways to improve this.
To explore further, try the below exercises:
# Exploration Exercises
```
#import tensorflow as tf
```
### Exercise 1:
For this first exercise run the below code: It creates a set of classifications for each of the test images, and then prints the first entry in the classifications. The output, after you run it is a list of numbers. Why do you think this is, and what do those numbers represent?
```
classifications = model.predict(test_images)
print(classifications[0])
```
**Hint:** try running `print(test_labels[0])` -- and you'll get a `9`. Does that help you understand why this list looks the way it does?
```
print(test_labels[0])
```
### E1Q1: What does this list represent?
1. It's 10 random meaningless values
2. It's the first 10 classifications that the computer made
3. It's the probability that this item is each of the 10 classes
#### Answer:
The correct answer is (3)
The output of the model is a list of 10 numbers. These numbers are a probability that the value being classified is the corresponding value (https://github.com/zalandoresearch/fashion-mnist#labels), i.e. the first value in the list is the probability that the image is of a '0' (T-shirt/top), the next is a '1' (Trouser) etc. Notice that they are all VERY LOW probabilities.
For index 9 (Ankle boot), the probability was in the 90's, i.e. the neural network is telling us that the image is most likely an ankle boot.
### E1Q2: How do you know that this list tells you that the item is an ankle boot?
1. There's not enough information to answer that question
2. The 10th element on the list is the biggest, and the ankle boot is labelled 9
2. The ankle boot is label 9, and there are 0->9 elements in the list
#### Answer
The correct answer is (2). Both the list and the labels are 0 based, so the ankle boot having label 9 means that it is the 10th of the 10 classes. The list having the 10th element being the highest value means that the Neural Network has predicted that the item it is classifying is most likely an ankle boot
### Exercise 2:
Let's now look at the layers in your model. Experiment with different values for the dense layer with 512 neurons. What different results do you get for loss, training time etc? Why do you think that's the case?
```
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1024, activation=tf.nn.relu), # Try experimenting with this layer
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
```
### E2Q1: Increase to 1024 Neurons -- What's the impact?
1. Training takes longer, but is more accurate
2. Training takes longer, but no impact on accuracy
3. Training takes the same time, but is more accurate
#### Answer
The correct answer is (1) by adding more Neurons we have to do more calculations, slowing down the process, but in this case they have a good impact -- we do get more accurate. That doesn't mean it's always a case of 'more is better', you can hit the law of diminishing returns very quickly!
### Exercise 3:
### E3Q1: What would happen if you remove the Flatten() layer. Why do you think that's the case?
#### Answer
You get an error about the shape of the data. It may seem vague right now, but it reinforces the rule of thumb that the first layer in your network should be the same shape as your data. Right now our data is 28x28 images, and 28 layers of 28 neurons would be infeasible, so it makes more sense to 'flatten' that 28,28 into a 784x1. Instead of writng all the code to handle that ourselves, we add the Flatten() layer at the begining, and when the arrays are loaded into the model later, they'll automatically be flattened for us.
```
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(), #Try removing this layer
tf.keras.layers.Dense(64, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
model.evaluate(test_images,test_labels)
```
### Exercise 4:
Consider the final (output) layers. Why are there 10 of them? What would happen if you had a different amount than 10? For example, try training the network with 5.
#### Answer
You get an error as soon as it finds an unexpected value. Another rule of thumb -- the number of neurons in the last layer should match the number of classes you are classifying for. In this case it's the digits 0-9, so there are 10 of them, hence you should have 10 neurons in your final layer.
```
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation=tf.nn.relu),
tf.keras.layers.Dense(8, activation=tf.nn.softmax) # Try experimenting with this layer
])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'])
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
```
### Exercise 5:
Consider the effects of additional layers in the network. What will happen if you add another layer between the one with 512 and the final layer with 10.
#### Answer
There isn't a significant impact -- because this is relatively simple data. For far more complex data (including color images to be classified as flowers that you'll see in the next lesson), extra layers are often necessary.
```
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(712,activation=tf.nn.softmax),
tf.keras.layers.Dense(256, activation=tf.nn.relu),
tf.keras.layers.Dense(10,activation=tf.nn.softmax)
])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'])
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
```
### Exercise 6:
### E6Q1: Consider the impact of training for more or less epochs. Why do you think that would be the case?
- Try 15 epochs -- you'll probably get a model with a much better loss than the one with 5
- Try 30 epochs -- you might see the loss value stops decreasing, and sometimes increases.
This is a side effect of something called 'overfitting' which you can learn about later and it's something you need to keep an eye out for when training neural networks. There's no point in wasting your time training if you aren't improving your loss, right! :)
```
mnist = tf.keras.datasets.mnist
(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()
training_images = training_images/255.0
test_images = test_images/255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5) # Experiment with the number of epochs
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[34])
print(test_labels[34])
```
### Exercise 7:
Before you trained, you normalized the data, going from values that were 0-255 to values that were 0-1. What would be the impact of removing that? Here's the complete code to give it a try. Why do you think you get different results?
```
mnist = tf.keras.datasets.mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
#training_images=training_images/255.0 # Experiment with removing this line
#test_images=test_images/255.0 # Experiment with removing this line
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
model.fit(training_images, training_labels, epochs=5)
model.evaluate(test_images, test_labels)
classifications = model.predict(test_images)
print(classifications[0])
print(test_labels[0])
```
### Exercise 8:
Earlier when you trained for extra epochs you had an issue where your loss might change. It might have taken a bit of time for you to wait for the training to do that, and you might have thought 'wouldn't it be nice if I could stop the training when I reach a desired value?' -- i.e. 95% accuracy might be enough for you, and if you reach that after 3 epochs, why sit around waiting for it to finish a lot more epochs....So how would you fix that? Like any other program...you have callbacks! Let's see them in action...
```
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy') >= 0.6): # Experiment with changing this value
print("\nReached 60% accuracy so cancelling training!")
self.model.stop_training = True
callbacks = myCallback()
mnist = tf.keras.datasets.fashion_mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_images=training_images/255.0
test_images=test_images/255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=5, callbacks=[callbacks])
```
| github_jupyter |
# Initialization
```
!pip install -U sentence-transformers
from sentence_transformers import SentenceTransformer, util
import torch
import json
import numpy as np
import pandas as pd
ISSUES_FILE = 'drive/MyDrive/bugs_data/eall.csv'
CUSTOM_MODEL_PATH = 'drive/MyDrive/bugs_data/models/paraphrase-distilroberta-base-v1-eall-40000'
```
# Issues Helper Methods
```
def get_issues(issues_file):
issues = pd.read_csv(issues_file)
issues['full_description'] = issues['short_desc'].astype(str) + '\n' + issues['description'].astype(str)
return issues
```
# Model Helper Methods
```
def get_base_model():
return SentenceTransformer('paraphrase-distilroberta-base-v1')
def get_custom_model(name):
return SentenceTransformer(name)
```
# Main
```
issues = get_issues(ISSUES_FILE)
issues = issues.iloc[-10000:].reset_index(drop=True)
len(issues)
model = get_custom_model(CUSTOM_MODEL_PATH)
model
model = get_base_model()
```
Gather the newest duplicates from the issues set
```
issues_new_duplicates = issues.iloc[-1500:].loc[issues['dup_id'] == issues['dup_id']].reset_index(drop=True)
!nvidia-smi
```
Ensure that the newest duplicates gathered are not in the set of issues from which we will try to retrieve top-k similar issues
```
issues_pool = issues[~issues['bug_id'].isin(issues_new_duplicates['bug_id'])].reset_index(drop=True)
```
Calculate embeddings for the issues pool
```
embeddings = model.encode(np.array(issues_pool['full_description']), convert_to_tensor=True)
```
# Top-K Retrieval Methods
```
def get_top_k_similar_issues(query_embedding, embeddings, top_k):
return util.semantic_search(query_embedding, embeddings, top_k=top_k)[0]
def evaluate_recall_at_top_k(model, query_issues, pool_issues, embeddings, top_k):
count = 0
correct = 0
for index, row in query_issues.iterrows():
count += 1
query_embedding = model.encode(row['full_description'], convert_to_tensor=True)
results = get_top_k_similar_issues(query_embedding, embeddings, top_k)
correct_prediction_found = False
for result in results:
result_issue = pool_issues.iloc[result['corpus_id']]
if result_issue['master_id'] == row['master_id']:
correct_prediction_found = True
if correct_prediction_found:
correct += 1
print(correct / count)
return correct / count
evaluate_recall_at_top_k(model, issues_new_duplicates, issues_pool, embeddings, 25)
```
# Fine-Tuning with OnlineContrastiveLoss
```
!pip install -U sentence-transformers
from sentence_transformers import SentenceTransformer, InputExample, losses, evaluation
from torch.utils.data import DataLoader
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import torch
PAIRS_FILE = 'drive/MyDrive/bugs_data/uall_pairs_40000.csv'
MODEL_OUTPUT_PATH = 'drive/MyDrive/bugs_data/models/paraphrase-distilroberta-base-v1-uall-40000'
pairs = pd.read_csv(PAIRS_FILE)
pairs_train, pairs_test = train_test_split(pairs, test_size=0.1)
train_data = []
for index, pair in pairs_train.iterrows():
train_sample = InputExample(texts=[pair['description_1'], pair['description_2']], label=float(pair['label']))
train_data.append(train_sample)
descriptions_1 = pairs_test['description_1'].to_list()
descriptions_2 = pairs_test['description_2'].to_list()
scores = pairs_test['label'].to_list()
model = SentenceTransformer('paraphrase-distilroberta-base-v1')
model
distance_metric = losses.SiameseDistanceMetric.COSINE_DISTANCE
margin = 0.5
evaluator = evaluation.EmbeddingSimilarityEvaluator(descriptions_1, descriptions_2, scores, write_csv=True)
train_dataloader = DataLoader(train_data, shuffle=True, batch_size=64)
train_loss = losses.OnlineContrastiveLoss(model=model, distance_metric=distance_metric, margin=margin)
model.fit(train_objectives=[(train_dataloader, train_loss)], epochs=5, warmup_steps=100, evaluator=evaluator, evaluation_steps=50, output_path=MODEL_OUTPUT_PATH, save_best_model=True)
model.save(MODEL_OUTPUT_PATH)
```
Evaluate Embeddings
```
import matplotlib.pyplot as plt
model_to_evaluate = SentenceTransformer('paraphrase-distilroberta-base-v1')
def evaluate_embeddings(model, pairs):
evaluations_list = []
for index, row in pairs.iterrows():
embedding_1 = model.encode(row['description_1'], convert_to_tensor=True)
embedding_2 = model.encode(row['description_2'], convert_to_tensor=True)
cos_similarity = torch.nn.CosineSimilarity(dim=0, eps=1e-6)
similarity = cos_similarity(embedding_1, embedding_2)
evaluations_list.append({
'prediction': similarity.item(),
'label': row['label']
})
df = pd.DataFrame(evaluations_list)
df['prediction'] = (df['prediction'] - df['prediction'].min()) / (df['prediction'].max() - df['prediction'].min())
colors = {0: '#ff6361', 1: '#58508d'}
plt.scatter(df.index, df['prediction'], c=df['label'].map(colors), s=5)
plt.xticks([])
plt.savefig('temp.png', dpi=300)
plt.show()
evaluate_embeddings(model_to_evaluate, pairs_test[-2000:])
```
| github_jupyter |
```
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from tqdm import tqdm
%matplotlib inline
from torch.utils.data import Dataset, DataLoader
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
from torch.nn import functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
m = 5 # 5, 50, 100, 500, 2000
train_size = 500 # 100, 500, 2000, 10000
desired_num = train_size + 1000
tr_i = 0
tr_j = train_size
tr_k = desired_num
tr_i, tr_j, tr_k
```
# Generate dataset
```
np.random.seed(12)
y = np.random.randint(0,10,5000)
idx= []
for i in range(10):
print(i,sum(y==i))
idx.append(y==i)
x = np.zeros((5000,2))
np.random.seed(12)
x[idx[0],:] = np.random.multivariate_normal(mean = [4,6.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[0]))
x[idx[1],:] = np.random.multivariate_normal(mean = [5.5,6],cov=[[0.01,0],[0,0.01]],size=sum(idx[1]))
x[idx[2],:] = np.random.multivariate_normal(mean = [4.5,4.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[2]))
x[idx[3],:] = np.random.multivariate_normal(mean = [3,3.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[3]))
x[idx[4],:] = np.random.multivariate_normal(mean = [2.5,5.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[4]))
x[idx[5],:] = np.random.multivariate_normal(mean = [3.5,8],cov=[[0.01,0],[0,0.01]],size=sum(idx[5]))
x[idx[6],:] = np.random.multivariate_normal(mean = [5.5,8],cov=[[0.01,0],[0,0.01]],size=sum(idx[6]))
x[idx[7],:] = np.random.multivariate_normal(mean = [7,6.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[7]))
x[idx[8],:] = np.random.multivariate_normal(mean = [6.5,4.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[8]))
x[idx[9],:] = np.random.multivariate_normal(mean = [5,3],cov=[[0.01,0],[0,0.01]],size=sum(idx[9]))
x[idx[0]][0], x[idx[5]][5]
for i in range(10):
plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
bg_idx = [ np.where(idx[3] == True)[0],
np.where(idx[4] == True)[0],
np.where(idx[5] == True)[0],
np.where(idx[6] == True)[0],
np.where(idx[7] == True)[0],
np.where(idx[8] == True)[0],
np.where(idx[9] == True)[0]]
bg_idx = np.concatenate(bg_idx, axis = 0)
bg_idx.shape
np.unique(bg_idx).shape
x = x - np.mean(x[bg_idx], axis = 0, keepdims = True)
np.mean(x[bg_idx], axis = 0, keepdims = True), np.mean(x, axis = 0, keepdims = True)
x = x/np.std(x[bg_idx], axis = 0, keepdims = True)
np.std(x[bg_idx], axis = 0, keepdims = True), np.std(x, axis = 0, keepdims = True)
for i in range(10):
plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
foreground_classes = {'class_0','class_1', 'class_2'}
background_classes = {'class_3','class_4', 'class_5', 'class_6','class_7', 'class_8', 'class_9'}
fg_class = np.random.randint(0,3)
fg_idx = np.random.randint(0,m)
a = []
for i in range(m):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(3,10)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
print(a.shape)
print(fg_class , fg_idx)
np.reshape(a,(2*m,1))
mosaic_list_of_images =[]
mosaic_label = []
fore_idx=[]
for j in range(desired_num):
np.random.seed(j)
fg_class = np.random.randint(0,3)
fg_idx = np.random.randint(0,m)
a = []
for i in range(m):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
# print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(3,10)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
# print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
mosaic_list_of_images.append(np.reshape(a,(2*m,1)))
mosaic_label.append(fg_class)
fore_idx.append(fg_idx)
mosaic_list_of_images = np.concatenate(mosaic_list_of_images,axis=1).T
mosaic_list_of_images.shape
mosaic_list_of_images.shape, mosaic_list_of_images[0]
for j in range(m):
print(mosaic_list_of_images[0][2*j:2*j+2])
def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number, m):
"""
mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point
labels : mosaic_dataset labels
foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average
dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9
"""
avg_image_dataset = []
cnt = 0
counter = np.zeros(m) #np.array([0,0,0,0,0,0,0,0,0])
for i in range(len(mosaic_dataset)):
img = torch.zeros([2], dtype=torch.float64)
np.random.seed(int(dataset_number*10000 + i))
give_pref = foreground_index[i] #np.random.randint(0,9)
# print("outside", give_pref,foreground_index[i])
for j in range(m):
if j == give_pref:
img = img + mosaic_dataset[i][2*j:2*j+2]*dataset_number/m #2 is data dim
else :
img = img + mosaic_dataset[i][2*j:2*j+2]*(m-dataset_number)/((m-1)*m)
if give_pref == foreground_index[i] :
# print("equal are", give_pref,foreground_index[i])
cnt += 1
counter[give_pref] += 1
else :
counter[give_pref] += 1
avg_image_dataset.append(img)
print("number of correct averaging happened for dataset "+str(dataset_number)+" is "+str(cnt))
print("the averaging are done as ", counter)
return avg_image_dataset , labels , foreground_index
avg_image_dataset_1 , labels_1, fg_index_1 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[0:tr_j], mosaic_label[0:tr_j], fore_idx[0:tr_j] , 1, m)
test_dataset , labels , fg_index = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[tr_j : tr_k], mosaic_label[tr_j : tr_k], fore_idx[tr_j : tr_k] , m, m)
avg_image_dataset_1 = torch.stack(avg_image_dataset_1, axis = 0)
# avg_image_dataset_1 = (avg - torch.mean(avg, keepdims= True, axis = 0)) / torch.std(avg, keepdims= True, axis = 0)
# print(torch.mean(avg_image_dataset_1, keepdims= True, axis = 0))
# print(torch.std(avg_image_dataset_1, keepdims= True, axis = 0))
print("=="*40)
test_dataset = torch.stack(test_dataset, axis = 0)
# test_dataset = (avg - torch.mean(avg, keepdims= True, axis = 0)) / torch.std(avg, keepdims= True, axis = 0)
# print(torch.mean(test_dataset, keepdims= True, axis = 0))
# print(torch.std(test_dataset, keepdims= True, axis = 0))
print("=="*40)
x1 = (avg_image_dataset_1).numpy()
y1 = np.array(labels_1)
plt.scatter(x1[y1==0,0], x1[y1==0,1], label='class 0')
plt.scatter(x1[y1==1,0], x1[y1==1,1], label='class 1')
plt.scatter(x1[y1==2,0], x1[y1==2,1], label='class 2')
plt.legend()
plt.title("dataset4 CIN with alpha = 1/"+str(m))
x1 = (test_dataset).numpy() / m
y1 = np.array(labels)
plt.scatter(x1[y1==0,0], x1[y1==0,1], label='class 0')
plt.scatter(x1[y1==1,0], x1[y1==1,1], label='class 1')
plt.scatter(x1[y1==2,0], x1[y1==2,1], label='class 2')
plt.legend()
plt.title("test dataset4")
test_dataset[0:10]/m
test_dataset = test_dataset/m
test_dataset[0:10]
test_dataset.shape
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
#self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx] #, self.fore_idx[idx]
avg_image_dataset_1[0].shape
avg_image_dataset_1[0]
batch = 200
traindata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )
trainloader_1 = DataLoader( traindata_1 , batch_size= batch ,shuffle=True)
testdata_1 = MosaicDataset(test_dataset, labels )
testloader_1 = DataLoader( testdata_1 , batch_size= batch ,shuffle=False)
# testdata_11 = MosaicDataset(test_dataset, labels )
# testloader_11 = DataLoader( testdata_11 , batch_size= batch ,shuffle=False)
class Whatnet(nn.Module):
def __init__(self):
super(Whatnet,self).__init__()
self.linear1 = nn.Linear(2,50)
self.linear2 = nn.Linear(50,3)
torch.nn.init.xavier_normal_(self.linear1.weight)
torch.nn.init.zeros_(self.linear1.bias)
torch.nn.init.xavier_normal_(self.linear2.weight)
torch.nn.init.zeros_(self.linear2.bias)
def forward(self,x):
x = F.relu(self.linear1(x))
x = (self.linear2(x))
return x
def calculate_loss(dataloader,model,criter):
model.eval()
r_loss = 0
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
outputs = model(inputs)
loss = criter(outputs, labels)
r_loss += loss.item()
return r_loss/(i+1)
def test_all(number, testloader,net):
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= net(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
pred = np.concatenate(pred, axis = 0)
out = np.concatenate(out, axis = 0)
print("unique out: ", np.unique(out), "unique pred: ", np.unique(pred) )
print("correct: ", correct, "total ", total)
print('Accuracy of the network on the %d test dataset %d: %.2f %%' % (total, number , 100 * correct / total))
def train_all(trainloader, ds_number, testloader_list, lr_list):
final_loss = []
for LR in lr_list:
print("--"*20, "Learning Rate used is", LR)
torch.manual_seed(12)
net = Whatnet().double()
net = net.to("cuda")
criterion_net = nn.CrossEntropyLoss()
optimizer_net = optim.Adam(net.parameters(), lr=0.001 ) #, momentum=0.9)
acti = []
loss_curi = []
epochs = 1000
running_loss = calculate_loss(trainloader,net,criterion_net)
loss_curi.append(running_loss)
print('epoch: [%d ] loss: %.3f' %(0,running_loss))
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
net.train()
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_net.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion_net(outputs, labels)
# print statistics
running_loss += loss.item()
loss.backward()
optimizer_net.step()
running_loss = calculate_loss(trainloader,net,criterion_net)
if(epoch%200 == 0):
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
loss_curi.append(running_loss) #loss per epoch
if running_loss<=0.05:
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
break
print('Finished Training')
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %.2f %%' % (total, 100 * correct / total))
for i, j in enumerate(testloader_list):
test_all(i+1, j,net)
print("--"*40)
final_loss.append(loss_curi)
return final_loss
train_loss_all=[]
testloader_list= [ testloader_1]
lr_list = [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5 ]
fin_loss = train_all(trainloader_1, 1, testloader_list, lr_list)
train_loss_all.append(fin_loss)
%matplotlib inline
len(fin_loss)
for i,j in enumerate(fin_loss):
plt.plot(j,label ="LR = "+str(lr_list[i]))
plt.xlabel("Epochs")
plt.ylabel("Training_loss")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
```
| github_jupyter |
```
import pandas as pd # data package
import matplotlib.pyplot as plt # graphics
import datetime as dt
import numpy as np
from census import Census # This is new...
import requests, io # internet and input tools
import zipfile as zf # zip file tools
import os
#import weightedcalcs as wc
#import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
print("")
print("**********************************************************************************")
print("Downloading and processing BLS file")
print("")
url = "https://data.bls.gov/cew/data/files/2017/csv/2017_annual_singlefile.zip"
# This will read in the annual, single file. It's big, but has all we want...
r = requests.get(url)
# convert bytes to zip file
bls_sf = zf.ZipFile(io.BytesIO(r.content))
print('Type of zipfile object:', type(bls_sf))
clist = ['area_fips', 'own_code', 'industry_code', 'agglvl_code', 'size_code',
'year', 'disclosure_code', 'annual_avg_estabs',
'annual_avg_emplvl', 'total_annual_wages','avg_annual_pay']
df = pd.read_csv(bls_sf.open(bls_sf.namelist()[0]), usecols= clist)
```
Then the file below cleans stuff up. The most important is the `NAICS_county_level` which selects the NAICS aggregation and then the county aggregation. Website describing this is here:
[https://data.bls.gov/cew/doc/titles/agglevel/agglevel_titles.htm](https://data.bls.gov/cew/doc/titles/agglevel/agglevel_titles.htm)
```
NAICS_county_level = 75
# This is the code that will select only counties at the 3 digit NAICS level
df_county = df[df.agglvl_code == NAICS_county_level].copy()
df_county = df_county[df_county.own_code == 5]
# Only grab private stuff
df_county = df_county[(df_county.area_fips.str[0:2] != "72") & (df_county.area_fips.str[0:2] != "78")
& (df_county.area_fips.str[0:2] != "02") & (df_county.area_fips.str[0:2] != "15")]
#Drop puerto rico, alaska, hawaii...this mayb not be doing what I think it is...as it looks like these guys are there
# Does not matter as analysis is performed withthem, drop them when do the map.
df_county["sup_ind"] = df_county.industry_code.str[1].astype(int)
# sometimes there are super industries floating around we want to drop them.
# not clear if this matters with the conditioning all ready
df_county = df_county[df_county["sup_ind"] > 0]
df_county.area_fips = df_county.area_fips.astype(str)
df_national = df_county.groupby("industry_code").agg({"annual_avg_emplvl": "sum"})
df_national.reset_index(inplace = True)
df_national.rename({"annual_avg_emplvl":"nat_emplvl"}, axis = 1, inplace = True)
```
Let's compute annual employment.
```
df_county.annual_avg_emplvl.sum()
```
which matches well with FRED (https://fred.stlouisfed.org/series/USPRIV) in 2017 (off by a couple million)
### Read in Trade Data and Merge
```
imports_by_naics = pd.read_csv(".//data//imports_by_naics.csv", dtype= {"naics3": str})
imports_by_naics.set_index(["naics3"], inplace = True)
dftrade_17_naics3 = pd.read_csv(".//data//2017_imports_by_naics.csv", dtype= {"naics3": str})
dftrade_17_naics3.set_index(["naics3"], inplace = True)
dftrade_17_naics3.head()
df_national = df_national.merge(dftrade_17_naics3["2017_china_trade"],
left_on = "industry_code", right_index = True, how = "left")
df_national["2017_china_trade"].replace(np.nan, 0, inplace = True)
df_national["trd_wts"] = (df_national["2017_china_trade"]/df_national["2017_china_trade"].sum())
```
Then check to make sure that the trade weights sum up to one.
```
df_national.trd_wts.sum()
```
---
### Step 3 Merge trade data with the county data
This is the most time consuming step (interms of compuation time). So start with the county data set, `groupby` county, then apply a function which will create (i) time varying exports (which are constructed with the 2017 weightes) and (ii) time varying tariffs (also constructed using the 2017) weights.
The final want is a big dataframe that has county, time, export exposure and tariff exposure.
```
print("")
print("**********************************************************************************")
print("Constructing County-Level Tariffs and Exports")
print("")
grp = df_county.groupby("area_fips")
# Let's just look at one of the groups...
#grp.get_group("1001").head()
```
Below are the two key functions that deliver this. Basically it does the following:
- Take a group at county level, merge it with the national level data set, so the resulting `df` has the county and nation.
- Create the weights.
- Then merge it with the exports, this will now be a df with exports varying over time, but with the fixed weights associated with each entry.
- Then aggregate the national exports by NAICS by the county level weights, giving a county level time series of exports.
---
**Updates**
- The tariff measure does the following: fix a county, take employment in industry $i$ and divide by total county employment, then sum up tariffs across industries with the weights being the county level share. The idea here is if all employment in a county is soy, then the "effective" tariff that the county faces is the soy tariff.
In equation terms: here $c$ is county, $s$ is industry, $n$, below is nation.
$\tau_{c,t} = \sum_{s\in S}\frac{L_{c,s}}{L_{c,S}} \tau_{s,t}$
Note that below, I make one further adjustment to make sure that $L_{c,S}$ is for all employment, not just the sum across $L_{c,s}$
- The export measure: What am I doing: take a county's employment in industry $i$ and divide by **national** level employment in industry $i$. Then a "county's" exports is the the sum across industries, weighted by the county's share of national employment in each industry. The idea here is, if a county's has all national level employment in an industry, all that industries exports will be assigned to that county.
$\mbox{EX}_{c,t} = \frac{1}{L_{c,S,2017}}\sum_{s\in S}\frac{L_{c,s,2017}}{L_{n,s,2017}} \mbox{EX}_{s,t}$
and then I divide by total employment in the county to have a county per worker measure. This is done for exports to China and then export in total. Note that below, I make one further adjustment to make sure that $L_{c,S}$ is for all employment, not just the sum across $L_{c,s}$
```
def create_trade_weights(df):
# Takes in the county groupings and will return, for each county, a time series of export
# exposure, tariffs, and other statistics.
new_df = df.merge(df_national[["nat_emplvl",
"industry_code", "trd_wts"]],
how = "outer", left_on = "industry_code", right_on = "industry_code")
# Merge the nation with the county, why, we want to make sure all the naics codes are lined up properly
new_df["emp_wts"] = (new_df.annual_avg_emplvl/new_df.nat_emplvl)
# create the weights...
foo_df = imports_by_naics.merge(new_df[["emp_wts","trd_wts",
"industry_code",
"annual_avg_emplvl"]], left_index = True, right_on = "industry_code")
# Now each weight is for a NAICS code, we will merge it with the export trade data set, so for all naics, all time...
# This is a big df whith all trade data and then the county's weights for each naics code
foo_grp = foo_df.groupby("time")
# group by time.
foo = foo_grp.apply(trade_by_naics)
# Then for each time gropuing, we aggregate across the naics codes according to the weights above.
foo = foo.droplevel(1)
foo["fips"] = df["area_fips"].astype(str).iloc[0]
# some cleaning of the df
foo["total_employment"] = new_df.annual_avg_emplvl.sum()
# get total employment.
return pd.DataFrame(foo)
def trade_by_naics(df):
# Simple function just to test about aggregation
china_imp_pc = (1/df["annual_avg_emplvl"].sum())*(df["china_trade"]*df["emp_wts"]).sum()
total_imp_pc = (1/df["annual_avg_emplvl"].sum())*(df["total_trade"]*df["emp_wts"]).sum()
# the first term multiplies trade by the county's share of national level employment
# then the outside term divides by number of workers in a county.
#tariff_nwt_pc = (1/df["annual_avg_emplvl"].sum())*(df["tariff_trd_w_avg"]*df["emp_wts"]).sum()
# This is the measure that makes most sense, need to justify it...
tariff = ((df["annual_avg_emplvl"]*df["tariff_trd_w_avg"])/df["annual_avg_emplvl"].sum()).sum()
# local employment share weighted tariff. So if all guys are in area are working in soy,
# then they are facing the soybean tariff....
foo = {"total_imp_pc": [total_imp_pc],
"china_imp_pc": [china_imp_pc],
"tariff": [tariff],
"emplvl_2017": df["annual_avg_emplvl"].sum()}
return pd.DataFrame(foo)
```
Then apply the function to the county groups
```
trade_county = grp.apply(create_trade_weights)
```
And we are done and output the file to where we want it
**One more adjustment.** Notice that in the function, when we are merging, we are droping all the NAICS codes without trade. So these measures (total trade, china trade, and tariffs) are only conditional on being traded. This only matters in so far as the denominator, the ``df["annual_avg_emplvl"].sum()`` is concerned.
To make the adjustment then, we multiply the employment measure in the denominator and then divide through by the ``total_employment`` measure.
```
trade_county["tariff"] = (trade_county["emplvl_2017"]/
trade_county["total_employment"])*trade_county["tariff"]
trade_county["china_imp_pc"] = (trade_county["emplvl_2017"]/
trade_county["total_employment"])*trade_county["china_imp_pc"]
trade_county["total_imp_pc"] = (trade_county["emplvl_2017"]/
trade_county["total_employment"])*trade_county["total_imp_pc"]
trade_county.sort_values(by = ["tariff","emplvl_2017"], ascending = False).head(25)
my_api_key = '34e40301bda77077e24c859c6c6c0b721ad73fc7'
# This is my api_key
c = Census(my_api_key)
# This will create an object c which has methods associated with it.
# We will see these below.
type(c)
# Per the discussion below, try c.tab and see the options.
code = ("NAME","B01001_001E","B19013_001E") # Same Codes:
county_2017 = pd.DataFrame(c.acs5.get(code,
{'for': 'county:*'}, year=2017))
# Same deal, but we specify county then the wild card
# On the example page, there are ways do do this, only by state
county_2017 = county_2017.rename(columns = {"B01001_001E":"2017_population", "B19013_001E":"2017_income"})
county_2017["GEOFIPS"] = (county_2017["state"] + county_2017["county"]).astype(int)
county_2017["2017_population"] = county_2017["2017_population"].astype(float)
county_2017["2017_income"] = county_2017["2017_income"].astype(float)
county_2017.set_index(["GEOFIPS"], inplace = True)
trade_county.reset_index(inplace = True)
trade_county["int_area_fips"] = trade_county["area_fips"].astype(int)
trade_county = trade_county.merge(county_2017[["2017_income","2017_population"]],
left_on = "int_area_fips", right_index = True, how = "left")
#trade_employ.drop(labels = "index", axis = 1, inplace = True)
trade_county.set_index(["area_fips", "time"],inplace = True)
trade_county.head()
file_path = ".\\data"+ "\\imports_trade_data_2020.parquet"
pq.write_table(pa.Table.from_pandas(trade_county.reset_index()), file_path)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import dill as pickle
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.externals import joblib
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn import svm
from sklearn.utils import resample
from sklearn.externals import joblib
import sklearn.metrics as metrics
```
## Importing required functions
- For data cleaning and feature extraction
```
clean_columns = pickle.load(open("./outputs/models/87995/clean_columns.pickle", "rb"))
create_windows = pickle.load(open("./outputs/models/87995/create_windows.pickle", "rb"))
extract_features = pickle.load(open("./outputs/models/87995/extract_features.pickle", "rb"))
min_boundary = pickle.load(open("./outputs/models/87995/min_boundary.pickle", "rb"))
max_boundary = pickle.load(open("./outputs/models/87995/max_boundary.pickle", "rb"))
min_speed = pickle.load(open("./outputs/models/87995/min_speed.pickle", "rb"))
max_speed = pickle.load(open("./outputs/models/87995/max_speed.pickle", "rb"))
min_accuracy = pickle.load(open("./outputs/models/87995/min_accuracy.pickle", "rb"))
max_accuracy = pickle.load(open("./outputs/models/87995/max_accuracy.pickle", "rb"))
```
## Importing chosen model weights
- Imported best classifier from previous notebook
- Can assert to check
```
MODEL_PATH = "outputs/models/87995/Gradient Boosted Machine_0.74.pkl"
model = joblib.load(MODEL_PATH)
```
## Import Evaluation set
- Assumes that evaluation set will come in the same format as provided features
- Multiple .csv files
- Currently testing with provided training data
- To replace with evaluation feature data
```
# To insert the paths including file names for each .csv file in a list
SOURCE_LIST = [
"../grab-ai-safety-data/features/part-00001-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv",
"../grab-ai-safety-data/features/part-00002-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv",
"../grab-ai-safety-data/features/part-00003-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv",
"../grab-ai-safety-data/features/part-00003-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv",
"../grab-ai-safety-data/features/part-00004-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv",
"../grab-ai-safety-data/features/part-00005-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv",
"../grab-ai-safety-data/features/part-00006-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv",
"../grab-ai-safety-data/features/part-00007-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv",
"../grab-ai-safety-data/features/part-00008-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv",
"../grab-ai-safety-data/features/part-00009-e6120af0-10c2-4248-97c4-81baf4304e5c-c000.csv",
]
li = []
for csv in SOURCE_LIST:
df = pd.read_csv(csv)
li.append(df)
df = pd.concat(
li,
axis=0,
ignore_index=True
)
```
## Apply transformations
```
df = (
df.pipe(
clean_columns
).pipe(
create_windows
).pipe(
extract_features
)
)
```
## Predict using imported model
```
preds = pd.DataFrame(
model.predict(df),
columns=["label"]
)
```
## Export results to csv
```
# preds.to_csv("destination_path")
```
| github_jupyter |
# This notebook demonstrates how to download the netcdf POES data files (in netcdf format) for a given date range (there are multiple files per day), process them to get auroral boundary (equatorward) and plot it!
```
import os
import datetime
from poes import dwnld_poes, get_aur_bnd, poes_plot_utils
from davitpy import utils
import matplotlib.pyplot as plt
%pylab inline
# dates to download raw poes files
sTimePOES = datetime.datetime( 2015,4,9 )
eTimePOES = datetime.datetime( 2015,4,9 )
# dir to store raw poes files
dayCount = (eTimePOES - sTimePOES).days + 1
# Loop through the days and download files
for inpDate in (sTimePOES + \
datetime.timedelta(n) for n in range(dayCount)):
poesDwnldObj = dwnld_poes.PoesDwnld(inpDate)
# NOTE : set a proper outdir otherwise the data
# is saved in the working directory by default
poesFiles = poesDwnldObj.get_all_sat_data(outDir="/tmp/poes/raw")
# Read data from the POES files
# and get the auroral boundary location
# by fitting a circle
poesRdObj = get_aur_bnd.PoesAur()
( poesAllEleDataDF, poesAllProDataDF ) = poesRdObj.read_poes_data_files(\
poesRawDate=sTimePOES,\
poesRawDir="/tmp/poes/raw/" )
# Or you can uncomment the line below and read the data!
# ( poesAllEleDataDF, poesAllProDataDF ) = poesRdObj.read_poes_data_files(poesFiles)
# Get for a given time get the closest satellite passes
# We can do this at multiple instances for a given time range/step
timeRange = [ poesAllEleDataDF["date"].min(),\
poesAllEleDataDF["date"].max() ]
# aurPassDF contains closest passes for a given time
# for all the satellites in both the hemispheres!
aurPassDF = poesRdObj.get_closest_sat_passes( poesAllEleDataDF,\
poesAllProDataDF, timeRange )
# determine auroral boundaries from all the POES satellites
# at a given time. The procedure is described in the code!
# go over it!!!
eqBndLocsDF = poesRdObj.get_nth_ele_eq_bnd_locs( aurPassDF,\
poesAllEleDataDF )
# to get an estimate of the auroral boundary! fit a circle
# to the boundaries determined from each satellite!
# The fits are written to a file and can be stored in
# a given location
# NOTE : set a proper outdir otherwise the data
# is saved in the working directory by default
bndDF=poesRdObj.fit_circle_aurbnd(eqBndLocsDF, outDir="/tmp/poes/bnd/")
print "ESTIMATED BOUNDARY"
print bndDF.head()
print "ESTIMATED BOUNDARY"
# Plot selected satellite passes between a time range
pltDate = datetime.datetime(2015,4,9)
timeRange = [ datetime.datetime(2015,4,9,7),\
datetime.datetime(2015,4,9,8) ]
coords = "mlt"
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(1,1,1)
m = utils.plotUtils.mapObj(boundinglat=40., coords=coords,\
lat_0=90., lon_0=0, datetime=timeRange[0])
poesPltObj = poes_plot_utils.PlotUtils(pltDate, pltCoords=coords)
poesPltObj.overlay_sat_pass(timeRange,m,ax,"/tmp/poes/raw/",\
satList=["m01", "n19"])
fig.savefig("figs/poes-demo1.pdf",bbox_inches='tight')
# Plot all closest (in time) satellite passes at a given time
# and also overlay the estimated auroral boundary
pltDate = datetime.datetime(2015,4,9)
selTime = datetime.datetime(2015,4,9,7,30)
coords = "mlt"
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(1,1,1)
m = utils.plotUtils.mapObj(boundinglat=40., coords=coords,\
lat_0=90., lon_0=0, datetime=selTime)
poesPltObj = poes_plot_utils.PlotUtils(pltDate, pltCoords=coords)
poesPltObj.overlay_closest_sat_pass(selTime,m,ax,"/tmp/poes/raw/")
# two ways to overlay estimated boundary!
# poesPltObj.overlay_equ_bnd(selTime,m,ax,rawSatDir="/tmp/poes/raw/")
poesPltObj.overlay_equ_bnd(selTime,m,ax,\
inpFileName="/tmp/poes/bnd/poes-fit-20150409.txt")
fig.savefig("figs/poes-demo2.pdf",bbox_inches='tight')
```
| github_jupyter |
# Model-Centric Federated Learning for Mobile - MNIST Example
This notebook will walk you through creating a simple model and a training plan, and hosting both as a federated learning process
for further training using OpenMined mobile FL workers.
This notebook is similar to "[MCFL - Create Plan](mcfl_create_plan.ipynb)"
however due to mobile limitations, the training plan is different.
```
# stdlib
import base64
import json
# third party
import torch as th
# syft absolute
import syft as sy
from syft.core.plan.plan_builder import ROOT_CLIENT
from syft.core.plan.plan_builder import PLAN_BUILDER_VM
from syft.core.plan.plan_builder import make_plan
from syft.core.plan.translation.torchscript.plan_translate import (
translate as translate_to_ts,
)
from syft.federated.model_centric_fl_client import ModelCentricFLClient
from syft.lib.python.int import Int
from syft.lib.python.list import List
th.random.manual_seed(42)
```
## Step 1: Define the model
This model will train on MNIST data, it's very simple yet can demonstrate learning process.
There're 2 linear layers:
* Linear 784x100
* ReLU
* Linear 100x10
Note that the model contains additional methods for convenience of torch reference usage:
* `backward` - calculates backward pass gradients because autograd doesn't work on mobile (yet).
* `softmax_cross_entropy_with_logits` - loss function
* `accuracy` - calculates accuracy of prediction
```
class MLP(sy.Module):
"""
Simple model with method for loss and hand-written backprop.
"""
def __init__(self, torch_ref) -> None:
super(MLP, self).__init__(torch_ref=torch_ref)
self.fc1 = torch_ref.nn.Linear(784, 100)
self.relu = torch_ref.nn.ReLU()
self.fc2 = torch_ref.nn.Linear(100, 10)
def forward(self, x):
self.z1 = self.fc1(x)
self.a1 = self.relu(self.z1)
return self.fc2(self.a1)
def backward(self, X, error):
z1_grad = (error @ self.fc2.state_dict()["weight"]) * (self.a1 > 0).float()
fc1_weight_grad = z1_grad.t() @ X
fc1_bias_grad = z1_grad.sum(0)
fc2_weight_grad = error.t() @ self.a1
fc2_bias_grad = error.sum(0)
return fc1_weight_grad, fc1_bias_grad, fc2_weight_grad, fc2_bias_grad
def softmax_cross_entropy_with_logits(self, logits, target, batch_size):
probs = self.torch_ref.softmax(logits, dim=1)
loss = -(target * self.torch_ref.log(probs)).sum(dim=1).mean()
loss_grad = (probs - target) / batch_size
return loss, loss_grad
def accuracy(self, logits, targets, batch_size):
pred = self.torch_ref.argmax(logits, dim=1)
targets_idx = self.torch_ref.argmax(targets, dim=1)
acc = pred.eq(targets_idx).sum().float() / batch_size
return acc
```
## Step 2: Define Training Plan
```
def set_remote_model_params(module_ptrs, params_list_ptr):
"""Sets the model parameters into traced model"""
param_idx = 0
for module_name, module_ptr in module_ptrs.items():
for param_name, _ in PLAN_BUILDER_VM.store[
module_ptr.id_at_location
].data.named_parameters():
module_ptr.register_parameter(param_name, params_list_ptr[param_idx])
param_idx += 1
# Create the model
local_model = MLP(th)
# Dummy inputs
bs = 3
classes_num = 10
model_params_zeros = sy.lib.python.List(
[th.nn.Parameter(th.zeros_like(param)) for param in local_model.parameters()]
)
@make_plan
def training_plan(
xs=th.randn(bs, 28 * 28),
ys=th.nn.functional.one_hot(th.randint(0, classes_num, [bs]), classes_num),
batch_size=th.tensor([bs]),
lr=th.tensor([0.1]),
params=model_params_zeros,
):
# send the model to plan builder (but not its default params)
# this is required to build the model inside the Plan
model = local_model.send(ROOT_CLIENT, send_parameters=False)
# set model params from input
set_remote_model_params(model.modules, params)
# forward
logits = model(xs)
# loss
loss, loss_grad = model.softmax_cross_entropy_with_logits(
logits, ys, batch_size
)
# backward
grads = model.backward(xs, loss_grad)
# SGD step
updated_params = tuple(
param - lr * grad for param, grad in zip(model.parameters(), grads)
)
# accuracy
acc = model.accuracy(logits, ys, batch_size)
# return things
return (loss, acc, *updated_params)
```
Translate the training plan to torchscript so it can be used with mobile workers.
```
# Translate to torchscript
ts_plan = translate_to_ts(training_plan)
# Let's examine its contents
print(ts_plan.torchscript.code)
```
## Step 3: Define Averaging Plan
Averaging Plan is executed by PyGrid at the end of the cycle,
to average _diffs_ submitted by workers and update the model
and create new checkpoint for the next cycle.
_Diff_ is the difference between client-trained
model params and original model params,
so it has same number of tensors and tensor's shapes
as the model parameters.
We define Plan that processes one diff at a time.
Such Plans require `iterative_plan` flag set to `True`
in `server_config` when hosting FL model to PyGrid.
Plan below will calculate simple mean of each parameter.
```
@make_plan
def avg_plan(
avg=List(local_model.parameters()), item=List(local_model.parameters()), num=Int(0)
):
new_avg = []
for i, param in enumerate(avg):
new_avg.append((avg[i] * num + item[i]) / (num + 1))
return new_avg
```
## Step 4: Define Federated Learning Process Configuration
Before hosting the model and training plan to PyGrid,
we need to define some configuration parameters, such as
FL process name, version, workers configuration,
authentication method, etc.
```
name = "mnist"
version = "1.0"
client_config = {
"name": name,
"version": version,
"batch_size": 64,
"lr": 0.01,
"max_updates": 100, # number of updates to execute on workers
}
server_config = {
"num_cycles": 30, # total number of cycles (how many times global model is updated)
"cycle_length": 60*60*24, # max duration of the training cycle in seconds
"max_diffs": 1, # number of diffs to collect before updating global model
"minimum_upload_speed": 0,
"minimum_download_speed": 0,
"iterative_plan": True, # tells PyGrid that avg plan is executed per diff
}
```
This FL process will require workers to authenticate with signed JWT token.
Providing the `pub_key` in FL configuration allows PyGrid to verify JWT tokens.
```
def read_file(fname):
with open(fname, "r") as f:
return f.read()
public_key = read_file("example_rsa.pub").strip()
server_config["authentication"] = {
"type": "jwt",
"pub_key": public_key,
}
```
## Step 5: Host in PyGrid
Let's now host everything in PyGrid so that it can be accessed by worker libraries.
Note: assuming the PyGrid Domain is running locally on port 7000.
### Step 5.1: Start a PyGrid Domain
- Clone PyGrid Github repository from https://github.com/OpenMined/PyGrid
- Install poetry using pip:
```
$ pip install poetry
```
- Go to apps/domain and install requirements:
```
$ poetry install
```
- run a Grid domain using the command:
```
$ ./run.sh --name bob --port 7000 --start_local_db
```
```
from syft.grid.client.client import connect
from syft.grid.client.grid_connection import (GridHTTPConnection,)
PYGRID_HOST = "pygrid.datax.io"
PYGRID_PORT = 7000
domain = connect(
url=f"http://{PYGRID_HOST}:{PYGRID_PORT}",
conn_type=GridHTTPConnection,
)
domain.setup(
email="owner@openmined.org",
password="owerpwd",
domain_name="OpenMined Node",
token="9G9MJ06OQH",
)
grid_address = f"{PYGRID_HOST}:{PYGRID_PORT}"
grid = ModelCentricFLClient(address=grid_address, secure=False)
grid.connect()
```
Following code sends FL model, training plans, and configuration to the PyGrid:
```
response = grid.host_federated_training(
model=local_model,
client_plans={
# Grid can store both types of plans (regular for python worker, torchscript for mobile):
"training_plan": training_plan,
"training_plan:ts": ts_plan,
},
client_protocols={},
server_averaging_plan=avg_plan,
client_config=client_config,
server_config=server_config,
)
response
```
If you see successful response, you've just hosted your first FL process into PyGrid!
If you see error that FL process already exists,
this means FL process with such name and version is already hosted.
You might want to update name/version in configuration above, or cleanup PyGrid database.
To cleanup database, set path below correctly and run:
```
# !rm ~/Projects/PyGrid/apps/domain/src/nodedatabase.db
```
To train hosted model, use one of the existing mobile FL workers:
* [SwiftSyft](https://github.com/OpenMined/SwiftSyft) (see included worker example)
* [KotlinSyft](https://github.com/OpenMined/KotlinSyft) (see included worker example)
Support for javascript worker is coming soon:
* [syft.js](https://github.com/OpenMined/syft.js)
| github_jupyter |
## Classes for callback implementors
```
from fastai.gen_doc.nbdoc import *
from fastai.callback import *
from fastai.basics import *
```
fastai provides a powerful *callback* system, which is documented on the [`callbacks`](/callbacks.html#callbacks) page; look on that page if you're just looking for how to use existing callbacks. If you want to create your own, you'll need to use the classes discussed below.
A key motivation for the callback system is that additional functionality can be entirely implemented in a single callback, so that it's easily read. By using this trick, we will have different methods categorized in different callbacks where we will find clearly stated all the interventions the method makes in training. For instance in the [`LRFinder`](/callbacks.lr_finder.html#LRFinder) callback, on top of running the fit function with exponentially growing LRs, it needs to handle some preparation and clean-up, and all this code can be in the same callback so we know exactly what it is doing and where to look if we need to change something.
In addition, it allows our [`fit`](/basic_train.html#fit) function to be very clean and simple, yet still easily extended. So far in implementing a number of recent papers, we haven't yet come across any situation where we had to modify our training loop source code - we've been able to use callbacks every time.
```
show_doc(Callback)
```
To create a new type of callback, you'll need to inherit from this class, and implement one or more methods as required for your purposes. Perhaps the easiest way to get started is to look at the source code for some of the pre-defined fastai callbacks. You might be surprised at how simple they are! For instance, here is the **entire** source code for [`GradientClipping`](/train.html#GradientClipping):
```python
@dataclass
class GradientClipping(LearnerCallback):
clip:float
def on_backward_end(self, **kwargs):
if self.clip:
nn.utils.clip_grad_norm_(self.learn.model.parameters(), self.clip)
```
You generally want your custom callback constructor to take a [`Learner`](/basic_train.html#Learner) parameter, e.g.:
```python
@dataclass
class MyCallback(Callback):
learn:Learner
```
Note that this allows the callback user to just pass your callback name to `callback_fns` when constructing their [`Learner`](/basic_train.html#Learner), since that always passes `self` when constructing callbacks from `callback_fns`. In addition, by passing the learner, this callback will have access to everything: e.g all the inputs/outputs as they are calculated, the losses, and also the data loaders, the optimizer, etc. At any time:
- Changing self.learn.data.train_dl or self.data.valid_dl will change them inside the fit function (we just need to pass the [`DataBunch`](/basic_data.html#DataBunch) object to the fit function and not data.train_dl/data.valid_dl)
- Changing self.learn.opt.opt (We have an [`OptimWrapper`](/callback.html#OptimWrapper) on top of the actual optimizer) will change it inside the fit function.
- Changing self.learn.data or self.learn.opt directly WILL NOT change the data or the optimizer inside the fit function.
In any of the callbacks you can unpack in the kwargs:
- `n_epochs`, contains the number of epochs the training will take in total
- `epoch`, contains the number of the current
- `iteration`, contains the number of iterations done since the beginning of training
- `num_batch`, contains the number of the batch we're at in the dataloader
- `last_input`, contains the last input that got through the model (eventually updated by a callback)
- `last_target`, contains the last target that got through the model (eventually updated by a callback)
- `last_output`, contains the last output spitted by the model (eventually updated by a callback)
- `last_loss`, contains the last loss computed (eventually updated by a callback)
- `smooth_loss`, contains the smoothed version of the loss
- `last_metrics`, contains the last validation loss and metrics computed
- `pbar`, the progress bar
- [`train`](/train.html#train), flag to know if we're in training mode or not
- `stop_training`, that will stop the training at the end of the current epoch if True
- `stop_epoch`, that will break the current epoch loop
- `skip_step`, that will skip the next optimizer step
- `skip_zero`, that will skip the next zero grad
When returning a dictionary with those key names, the state of the [`CallbackHandler`](/callback.html#CallbackHandler) will be updated with any of those changes, so in any [`Callback`](/callback.html#Callback), you can change those values.
### Methods your subclass can implement
All of these methods are optional; your subclass can handle as many or as few as you require.
```
show_doc(Callback.on_train_begin)
```
Here we can initiliaze anything we need.
The optimizer has now been initialized. We can change any hyper-parameters by typing, for instance:
```
self.opt.lr = new_lr
self.opt.mom = new_mom
self.opt.wd = new_wd
self.opt.beta = new_beta
```
```
show_doc(Callback.on_epoch_begin)
```
This is not technically required since we have `on_train_begin` for epoch 0 and `on_epoch_end` for all the other epochs,
yet it makes writing code that needs to be done at the beginning of every epoch easy and more readable.
```
show_doc(Callback.on_batch_begin)
```
Here is the perfect place to prepare everything before the model is called.
Example: change the values of the hyperparameters (if we don't do it on_batch_end instead)
At the end of that event `xb`,`yb` will be set to `last_input`, `last_target` of the state of the [`CallbackHandler`](/callback.html#CallbackHandler).
```
show_doc(Callback.on_loss_begin)
```
Here is the place to run some code that needs to be executed after the output has been computed but before the
loss computation.
Example: putting the output back in FP32 when training in mixed precision.
At the end of that event the output will be set to `last_output` of the state of the [`CallbackHandler`](/callback.html#CallbackHandler).
```
show_doc(Callback.on_backward_begin)
```
Here is the place to run some code that needs to be executed after the loss has been computed but before the gradient computation.
Example: `reg_fn` in RNNs.
At the end of that event the output will be set to `last_loss` of the state of the [`CallbackHandler`](/callback.html#CallbackHandler).
```
show_doc(Callback.on_backward_end)
```
Here is the place to run some code that needs to be executed after the gradients have been computed but
before the optimizer is called.
If `skip_step` is `True` at the end of this event, the optimizer step is skipped.
```
show_doc(Callback.on_step_end)
```
Here is the place to run some code that needs to be executed after the optimizer step but before the gradients
are zeroed.
If `skip_zero` is `True` at the end of this event, the gradients are not zeroed.
```
show_doc(Callback.on_batch_end)
```
Here is the place to run some code that needs to be executed after a batch is fully done.
Example: change the values of the hyperparameters (if we don't do it on_batch_begin instead)
If `end_epoch` is `True` at the end of this event, the current epoch is interrupted (example: lr_finder stops the training when the loss explodes).
```
show_doc(Callback.on_epoch_end)
```
Here is the place to run some code that needs to be executed at the end of an epoch.
Example: Save the model if we have a new best validation loss/metric.
If `end_training` is `True` at the end of this event, the training stops (example: early stopping).
```
show_doc(Callback.on_train_end)
```
Here is the place to tidy everything. It's always executed even if there was an error during the training loop,
and has an extra kwarg named exception to check if there was an exception or not.
Examples: save log_files, load best model found during training
```
show_doc(Callback.get_state)
```
This is used internally when trying to export a [`Learner`](/basic_train.html#Learner). You won't need to subclass this function but you can add attribute names to the lists `exclude` or `not_min`of the [`Callback`](/callback.html#Callback) you are designing. Attributes in `exclude` are never saved, attributes in `not_min` only if `minimal=False`.
## Annealing functions
The following functions provide different annealing schedules. You probably won't need to call them directly, but would instead use them as part of a callback. Here's what each one looks like:
```
annealings = "NO LINEAR COS EXP POLY".split()
fns = [annealing_no, annealing_linear, annealing_cos, annealing_exp, annealing_poly(0.8)]
for fn, t in zip(fns, annealings):
plt.plot(np.arange(0, 100), [fn(2, 1e-2, o)
for o in np.linspace(0.01,1,100)], label=t)
plt.legend();
show_doc(annealing_cos)
show_doc(annealing_exp)
show_doc(annealing_linear)
show_doc(annealing_no)
show_doc(annealing_poly)
show_doc(CallbackHandler)
```
You probably won't need to use this class yourself. It's used by fastai to combine all the callbacks together and call any relevant callback functions for each training stage. The methods below simply call the equivalent method in each callback function in [`self.callbacks`](/callbacks.html#callbacks).
```
show_doc(CallbackHandler.on_backward_begin)
show_doc(CallbackHandler.on_backward_end)
show_doc(CallbackHandler.on_batch_begin)
show_doc(CallbackHandler.on_batch_end)
show_doc(CallbackHandler.on_epoch_begin)
show_doc(CallbackHandler.on_epoch_end)
show_doc(CallbackHandler.on_loss_begin)
show_doc(CallbackHandler.on_step_end)
show_doc(CallbackHandler.on_train_begin)
show_doc(CallbackHandler.on_train_end)
show_doc(CallbackHandler.set_dl)
show_doc(OptimWrapper)
```
This is a convenience class that provides a consistent API for getting and setting optimizer hyperparameters. For instance, for [`optim.Adam`](https://pytorch.org/docs/stable/optim.html#torch.optim.Adam) the momentum parameter is actually `betas[0]`, whereas for [`optim.SGD`](https://pytorch.org/docs/stable/optim.html#torch.optim.SGD) it's simply `momentum`. As another example, the details of handling weight decay depend on whether you are using `true_wd` or the traditional L2 regularization approach.
This class also handles setting different WD and LR for each layer group, for discriminative layer training.
```
show_doc(OptimWrapper.clear)
show_doc(OptimWrapper.create)
show_doc(OptimWrapper.new)
show_doc(OptimWrapper.read_defaults)
show_doc(OptimWrapper.read_val)
show_doc(OptimWrapper.set_val)
show_doc(OptimWrapper.step)
show_doc(OptimWrapper.zero_grad)
show_doc(SmoothenValue)
```
Used for smoothing loss in [`Recorder`](/basic_train.html#Recorder).
```
show_doc(SmoothenValue.add_value)
show_doc(Scheduler)
```
Used for creating annealing schedules, mainly for [`OneCycleScheduler`](/callbacks.one_cycle.html#OneCycleScheduler).
```
show_doc(Scheduler.step)
show_doc(AverageMetric)
```
See the documentation on [`metrics`](/metrics.html#metrics) for more information.
### Callback methods
You don't call these yourself - they're called by fastai's [`Callback`](/callback.html#Callback) system automatically to enable the class's functionality.
```
show_doc(AverageMetric.on_epoch_begin)
show_doc(AverageMetric.on_batch_end)
show_doc(AverageMetric.on_epoch_end)
```
## Undocumented Methods - Methods moved below this line will intentionally be hidden
## New Methods - Please document or move to the undocumented section
| github_jupyter |
# 1. SETTINGS
```
# libraries
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
import lightgbm as lgb
import seaborn as sns
import matplotlib.pyplot as plt
# garbage collection
import gc
gc.enable()
# pandas options
pd.set_option("display.max_columns", None)
# ignore warnings
import warnings
warnings.filterwarnings("ignore")
# random settings
seed = 42
```
# 2. PREPARATIONS
```
# dataset
data = "v1"
# import data
train = pd.read_csv("../data/prepared/train_" + str(data) + ".csv")
test = pd.read_csv("../data/prepared/test_" + str(data) + ".csv")
y = pd.read_csv("../data/prepared/y_" + str(data) + ".csv")
# sort data
train = train.sort_values("SK_ID_CURR")
y = y.sort_values("SK_ID_CURR")
# extract target
y = y["TARGET"]
# exclude features
excluded_feats = ["SK_ID_CURR"]
features = [f for f in train.columns if f not in excluded_feats]
# check dimensions
print(train[features].shape)
print(test[features].shape)
### PARAMETERS
# parallel settings
cores = 10
# learner settings
metric = "auc"
verbose = 500
stopping = 300
# CV settings
num_folds = 5
shuffle = True
# lightGBM
gbm = lgb.LGBMClassifier(n_estimators = 10000,
learning_rate = 0.005,
num_leaves = 70,
colsample_bytree = 0.8,
subsample = 0.9,
max_depth = 7,
reg_alpha = 0.1,
reg_lambda = 0.1,
min_split_gain = 0.01,
min_child_weight = 2,
random_state = seed,
num_threads = cores)
```
# 3. CROSS-VALIDATION
## 3.1. ALL FEATURES
```
# data partitinoing
folds = StratifiedKFold(n_splits = num_folds, random_state = seed, shuffle = shuffle)
# placeholders
valid_aucs_cv = np.zeros(num_folds)
test_preds_cv = np.zeros(test.shape[0])
feature_importance_df = pd.DataFrame()
### CROSS-VALIDATION LOOP
for n_fold, (trn_idx, val_idx) in enumerate(folds.split(train, y)):
# data partitioning
trn_x, trn_y = train[features].iloc[trn_idx], y.iloc[trn_idx]
val_x, val_y = train[features].iloc[val_idx], y.iloc[val_idx]
# train lightGBM
gbm = gbm.fit(trn_x, trn_y,
eval_set = [(trn_x, trn_y), (val_x, val_y)],
eval_metric = metric,
verbose = verbose,
early_stopping_rounds = stopping)
# save number of iterations
num_iter_cv = gbm.best_iteration_
# predictions
valid_preds_cv = gbm.predict_proba(val_x, num_iteration = num_iter_cv)[:, 1]
valid_aucs_cv[n_fold] = roc_auc_score(val_y, valid_preds_cv)
test_preds_cv += gbm.predict_proba(test[features], num_iteration = num_iter_cv)[:, 1] / folds.n_splits
# importance
fold_importance_df = pd.DataFrame()
fold_importance_df["Feature"] = features
fold_importance_df["Importance"] = gbm.feature_importances_
fold_importance_df["Fold"] = n_fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis = 0)
# print performance
print("----------------------")
print("Fold%2d AUC: %.6f" % (n_fold + 1, valid_aucs_cv[n_fold]))
print("----------------------")
print("")
# clear memory
del trn_x, trn_y, val_x, val_y
gc.collect()
# print overall performance
auc = np.mean(valid_aucs_cv)
print("Cross-Validation AUC score %.6f" % np.mean(valid_aucs_cv))
##### VARIABLE IMPORTANCE
# load importance
top_feats = 50
cols = feature_importance_df[["Feature", "Importance"]].groupby("Feature").mean().sort_values(by = "Importance", ascending = False)[0:top_feats].index
importance = feature_importance_df.loc[feature_importance_df.Feature.isin(cols)]
# plot variable importance
plt.figure(figsize = (10, 10))
sns.barplot(x = "Importance", y = "Feature", data = importance.sort_values(by = "Importance", ascending = False))
plt.title('LightGBM Variable Importance (mean over CV folds)')
plt.tight_layout()
# save plot as pdf
plt.savefig("../var_importance.pdf")
```
## 3.2. TOP FEATURES
```
# keep top features
top = 500
cols = feature_importance_df[["Feature", "Importance"]].groupby("Feature").mean().sort_values(by = "Importance", ascending = False)[0:top].index
importance = feature_importance_df.loc[feature_importance_df.Feature.isin(cols)]
features = list(importance.groupby("Feature").Importance.mean().sort_values(ascending = False).index)
# check dimensions
print(train[features].shape)
print(test[features].shape)
### CROSS-VALIDATION LOOP
for n_fold, (trn_idx, val_idx) in enumerate(folds.split(train, y)):
# data partitioning
trn_x, trn_y = train[features].iloc[trn_idx], y.iloc[trn_idx]
val_x, val_y = train[features].iloc[val_idx], y.iloc[val_idx]
# train lightGBM
gbm = gbm.fit(trn_x, trn_y,
eval_set = [(trn_x, trn_y), (val_x, val_y)],
eval_metric = metric,
verbose = verbose,
early_stopping_rounds = stopping)
# save number of iterations
num_iter_cv = gbm.best_iteration_
# predictions
valid_preds_cv = gbm.predict_proba(val_x, num_iteration = num_iter_cv)[:, 1]
valid_aucs_cv[n_fold] = roc_auc_score(val_y, valid_preds_cv)
test_preds_cv += gbm.predict_proba(test[features], num_iteration = num_iter_cv)[:, 1] / folds.n_splits
# print performance
print("----------------------")
print("Fold%2d AUC: %.6f" % (n_fold + 1, valid_aucs_cv[n_fold]))
print("----------------------")
print("")
# clear memory
del trn_x, trn_y, val_x, val_y
gc.collect()
# print overall performance
auc = np.mean(valid_aucs_cv)
print("Cross-Validation AUC score %.6f" % auc)
```
# 4. SUBMISSION
```
# create submission
test["TARGET"] = test_preds_cv
subm = test[["SK_ID_CURR", "TARGET"]]
# check rank correlation with the best submission
from scipy.stats import spearmanr
best = pd.read_csv("../submissions/rmean_top7_03072018.csv")
spearmanr(test.TARGET, best.TARGET)
# export CSV
subm.to_csv("../submissions/auc" + str(round(auc, 6))[2:8] + "_bag_lgb_top" + str(top) + ".csv", index = False, float_format = "%.8f")
# no card, old features (560): 0.786941 | 0.783
# no card, new features (694): 0.788893 | 0.783
# with card, new features (1072): 0.790123 | 0.787
# with card and kernel features (1109): 0.790053 |
# card, kernel, factorize, no na (978): 0.790803 |
# card, kern, fac, nona, adummy (1193): 0.791321 |
# full data, one-hot ecoding (1844): 0.791850 |
# full data, one-hot, extra sums (2486): 0.791880 | 0.789
# full, one-hot, sums, buroscore (2501): 0.791761 |
# full, one-hot, clean, buroscore (1826): 0.791867 |
# last data + ext, age ratios (1828): 0.791808 |
# new app feats, remove weighted (1830): 0.794241 | 0.795
# previous data - top1000 LGB features: 0.794384 |
# select top1500 LGB features: 0.794384 |
```
| github_jupyter |
```
from openrtdynamics2.dsp import *
import math
import numpy as np
import openrtdynamics2.lang, openrtdynamics2.dsp as dy
import openrtdynamics2.py_execute as dyexe
import openrtdynamics2.targets as tg
import os
import matplotlib.pyplot as plt
#%matplotlib widget
# https://github.com/matplotlib/ipympl
from vehicle_lib.vehicle_lib import *
system = dy.enter_system()
velocity = dy.system_input( dy.DataTypeFloat64(1), name='velocity', default_value=5.0, value_range=[0, 25], title="vehicle velocity [m/s]")
s1 = dy.system_input( dy.DataTypeFloat64(1), name='s1', default_value=0.0, value_range=[-0.05, 0.05], title="steering rate 1 [rad/s]")
s2 = dy.system_input( dy.DataTypeFloat64(1), name='s2', default_value=0.0, value_range=[-0.05, 0.05], title="steering rate 2 [rad/s]")
s3 = dy.system_input( dy.DataTypeFloat64(1), name='s3', default_value=0.0, value_range=[-0.05, 0.05], title="steering rate 3 [rad/s]")
s4 = dy.system_input( dy.DataTypeFloat64(1), name='s4', default_value=0.0, value_range=[-0.05, 0.05], title="steering rate 4 [rad/s]")
s5 = dy.system_input( dy.DataTypeFloat64(1), name='s5', default_value=0.0, value_range=[-0.05, 0.05], title="steering rate 5 [rad/s]")
initial_steering = dy.system_input( dy.DataTypeFloat64(1), name='initial_steering', default_value=-0.0, value_range=[-40, 40], title="initial steering angle [degrees]") * dy.float64(math.pi / 180.0)
initial_orientation = dy.system_input( dy.DataTypeFloat64(1), name='initial_orientation', default_value=0.0, value_range=[-360, 360], title="initial orientation angle [degrees]") * dy.float64(math.pi / 180.0)
# parameters
wheelbase = 3.0
# sampling time
Ts = 0.01
steering_rate = dy.float64(0)
cnt = dy.counter()
steering_rate = dy.conditional_overwrite(steering_rate, condition= cnt >= dy.int32(200), new_value=s1 )
steering_rate = dy.conditional_overwrite(steering_rate, condition= cnt >= dy.int32(400), new_value=s2 )
steering_rate = dy.conditional_overwrite(steering_rate, condition= cnt >= dy.int32(600), new_value=s3 )
steering_rate = dy.conditional_overwrite(steering_rate, condition= cnt >= dy.int32(800), new_value=s4 )
steering_rate = dy.conditional_overwrite(steering_rate, condition= cnt >= dy.int32(1000), new_value=s5 )
# linearly increasing steering angle
delta = dy.euler_integrator( steering_rate, Ts, initial_state=initial_steering )
delta = dy.saturate(u=delta, lower_limit=-math.pi/2.0, upper_limit=math.pi/2.0)
# the model of the vehicle
x, y, psi, x_dot, y_dot, psi_dot = discrete_time_bicycle_model(delta, velocity, Ts, wheelbase, psi0=initial_orientation)
#
# outputs: these are available for visualization in the html set-up
#
dy.append_output(x, 'x')
dy.append_output(y, 'y')
dy.append_output(psi, 'psi')
dy.append_output(delta, 'steering')
# generate code for Web Assembly (wasm), requires emcc (emscripten) to build
code_gen_results = dy.generate_code(template=tg.TargetCppWASM(), folder="generated/trajectory_generation", build=True)
#
dy.clear()
from IPython.display import JSON
JSON(code_gen_results['manifest'])
compiled_system = dyexe.CompiledCode(code_gen_results)
testsim = dyexe.SystemInstance(compiled_system)
N=3000
input_data = {'initial_steering': 0.0, 's1' : 0.1, 's2' : -0.1 }
# sim_results = run_batch_simulation(testsim, input_data, N )
sim_results = dyexe.run_batch_simulation(testsim, input_data, N, output_keys=['x', 'y', 'steering'] )
sim_results['y']
plt.figure()
plt.plot(sim_results['x'], sim_results['y'])
plt.show()
plt.figure()
plt.plot( sim_results['steering'])
testsim = dyexe.SystemInstance(compiled_system)
N=600
plt.figure()
for s2 in np.linspace(-0.0,-0.2,5):
input_data = {'initial_steering': 0.0, 's1' : 0.1, 's2' : s2 }
sim_results = dyexe.run_batch_simulation(testsim, input_data, N, output_keys=['x', 'y', 'steering'] )
plt.plot(sim_results['x'], sim_results['y'])
testsim = dyexe.SystemInstance(compiled_system)
N=600+200
plt.figure()
for s2 in np.linspace(-0.0,-0.2,5):
for s3 in np.linspace(-0.0,-0.2,5):
input_data = {'initial_steering': 0.0, 's1' : 0.1, 's2' : s2, 's3' : s3 }
sim_results = dyexe.run_batch_simulation(testsim, input_data, N, output_keys=['x', 'y', 'steering'] )
plt.plot(sim_results['x'], sim_results['y'])
testsim = dyexe.SystemInstance(compiled_system)
N=600+200+200
plt.figure()
for s2 in np.linspace(-0.0,-0.2,5):
for s3 in np.linspace(-0.0,-0.1,5):
for s4 in np.linspace(0.1,+0.2,4):
input_data = {'initial_steering': 0.0, 's1' : 0.1, 's2' : s2, 's3' : s3, 's4' : s4 }
sim_results = dyexe.run_batch_simulation(testsim, input_data, N, output_keys=['x', 'y', 'steering'] )
plt.plot(sim_results['x'], sim_results['y'])
testsim = dyexe.SystemInstance(compiled_system)
N=600+200+200+200
plt.figure()
for s2 in np.linspace(-0.0,-0.2,5):
for s3 in np.linspace(-0.0,-0.1,5):
for s4 in np.linspace(0.1,+0.2,4):
for s5 in np.linspace(-0.1,+0.1,4):
input_data = {'initial_steering': 0.0, 's1' : 0.1, 's2' : s2, 's3' : s3, 's4' : s4, 's5' : s5 }
sim_results = dyexe.run_batch_simulation(testsim, input_data, N, output_keys=['x', 'y', 'steering'] )
plt.plot(sim_results['x'], sim_results['y'])
```
| github_jupyter |
```
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
# config = tf.ConfigProto()
# config.gpu_options.allocator_type = 'BFC' #A "Best-fit with coalescing" algorithm, simplified from a version of dlmalloc.
# config.gpu_options.per_process_gpu_memory_fraction = 0.3
# config.gpu_options.allow_growth = True
# set_session(tf.Session(config=config))
## LIMIT GPU USAGE
config = tf.ConfigProto(log_device_placement=True)
config.gpu_options.allow_growth = True # don't pre-allocate memory; allocate as-needed
config.gpu_options.per_process_gpu_memory_fraction = 0.95 # limit memory to be allocated
set_session(tf.Session(config=config)) # create sess w/ above settings
print(tf.test.is_built_with_cuda())
import tensorflow as tf
if tf.test.gpu_device_name():
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
else:
print("Please install GPU version of TF")
# keras example imports
from keras.models import load_model
## extra imports to set GPU options
import tensorflow as tf
from keras import backend as k
k.get_session().close()
###################################
# TensorFlow wizardry
config = tf.ConfigProto()
# Don't pre-allocate memory; allocate as-needed
config.gpu_options.allow_growth = True
# Only allow a total of half the GPU memory to be allocated
config.gpu_options.per_process_gpu_memory_fraction = 0.95
# Create a session with the above options specified.
k.tensorflow_backend.set_session(tf.Session(config=config))
###################################
print(tf.test.is_built_with_cuda())
from math import sqrt
from numpy import concatenate
from matplotlib import pyplot
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
import os
import numpy as np
from tabulate import tabulate
from statistics import mean
#this function is to get the time string like h:m:s
#========================================================================================
def getTime(time):
time=time%(24*3600)
hours=time//3600
time%=3600
minutes=time//60
time%=60
seconds=time
periods=[('hours',int(hours)),('minutes',int(minutes)),('seconds',int(seconds))]
time_string=':'.join('{}'.format(value) for name,value in periods)
return time_string
#========================================================================================
import csv
dirpath = os.getcwd()
path = dirpath + '/LSTMExperimentResults_AfterDefense/1%/RMSE_Confidence_Interval (epoch= 10, batch = 20 , neurons = 10).csv'
myfile1 = open(path,'w', newline='')
writer1 = csv.writer(myfile1)
heading =['Samples','Number of Observations','RMSEValues','Mean','Standard Errors','Upper Bound','Lower Bound','Execution Time']
writer1.writerow(heading)
myfile1.close()
```
https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/
https://machinelearningmastery.com/time-series-forecasting-long-short-term-memory-network-python/
https://machinelearningmastery.com/multi-step-time-series-forecasting-long-short-term-memory-networks-python/
https://machinelearningmastery.com/models-sequence-prediction-recurrent-neural-networks/
https://machinelearningmastery.com/how-to-develop-rnn-models-for-human-activity-recognition-time-series-classification/
```
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# specify the number of lag hours
list_n_mins = [1,5,10,15,30,60]
#percentage = ['5%','10%','15%','20%','25%','30%','35%','40%','45%','50%']
percentage = ['1%']
for n_mins in list_n_mins:
for percent in percentage:
import time
start_time = time.time()
# load dataset
dirpath = os.getcwd()
dataset = read_csv(dirpath + '/datasetForLSTM/60_'+percent+'_Allcombine.csv', header=0, index_col=0)
values = dataset.values
epochs = 10
batch_size = 20
neurons = 10
n_features = 6
# frame as supervised learning
reframed = series_to_supervised(values, n_mins, 1)
print(reframed.head())
# drop columns we don't want to predict
# for number of mins = 1, drop columns ==> [6,7,8,9,10] , index 11 for gridlock
# for number of mins = 5, drop columns ==> [30,31,32,33,34] , index 35 for gridlock
# for number of mins = 10, drop columns ==> [60,61,62,63,64] , index 65 for gridlock
# for number of mins = 15, drop columns ==> [90,91,92,93,94] , index 95 for gridlock
# for number of mins = 30, drop columns ==> [180,181,182,183,184] , index 95 for gridlock
# for number of mins = 60, drop columns ==> [360,361,362,363,364] , index 365 for gridlock
# for number of mins = 90, drop columns ==> [540,541,542,543,544] , index 545 for gridlock
if n_mins == 1:
reframed.drop(reframed.columns[[6,7,8,9,10]], axis=1, inplace=True)
if n_mins == 5:
reframed.drop(reframed.columns[[30,31,32,33,34]], axis=1, inplace=True)
if n_mins == 10:
reframed.drop(reframed.columns[[60,61,62,63,64]], axis=1, inplace=True)
if n_mins == 15:
reframed.drop(reframed.columns[[90,91,92,93,94]], axis=1, inplace=True)
if n_mins == 30:
reframed.drop(reframed.columns[[180,181,182,183,184]], axis=1, inplace=True)
if n_mins == 60:
reframed.drop(reframed.columns[[360,361,362,363,364]], axis=1, inplace=True)
# reframed.to_csv(dirpath + '/datasetForLSTM/60_5%_Allcombine_reframed.csv',index=False )
#print(reframed.head())
reframed.columns
# split into train and test sets
values = reframed.values
n_train_mins =80 * 181
train = values[:n_train_mins, :]
test = values[n_train_mins:, :]
# split into input and outputs
n_obs = n_mins * n_features
train_X, train_y = train[:, :n_obs], train[:, -1]
test_X, test_y = test[:, :n_obs], test[:, -1]
print(train_X.shape, len(train_X), train_y.shape)
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], n_mins, n_features))
test_X = test_X.reshape((test_X.shape[0], n_mins, n_features))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
# design network
model = Sequential()
model.add(LSTM(neurons, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
# fit network
history = model.fit(train_X, train_y, epochs=epochs, batch_size=batch_size, validation_data=(test_X, test_y), verbose=0, shuffle=False)
# plot history
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.xlabel('epoch')
pyplot.ylabel('loss')
pyplot.legend()
pyplot.savefig(dirpath + '/LSTMExperimentResults_AfterDefense/1%/'+percent+'_'+str(n_mins)+'min_'+str(epochs)+'epochs_'+str(batch_size)+'batch_size_'+str(neurons)+'neurons.png')
pyplot.clf()
# pyplot.show()
# make a prediction
yhat = model.predict(test_X)
# temp_yhat = yhat
# temp_yhat = [np.round(num) for num in yhat]
# pyplot.plot(test_y, 'r-',label='actual')
# pyplot.show()
# pyplot.plot(temp_yhat, 'b-',label='predict')
# pyplot.show()
test_X.shape
repeats = 10
rmse_list = list()
acutal_predicted_df = DataFrame()
acutal_predicted_df['actual']= test_y
acutal_predicted_df['predicted']= yhat
acutal_predicted_df.to_csv(dirpath + '/LSTMExperimentResults_AfterDefense/1%/'+percent+'_'+str(n_mins)+'min_'+str(epochs)+'epochs_'+str(batch_size)+'batch_size_'+str(neurons)+'neurons.csv', index=False)
for r in range(repeats):
# make a prediction
test_X, test_y = test[:, :n_obs], test[:, -1]
test_X = test_X.reshape((test_X.shape[0], n_mins, n_features))
yhat = model.predict(test_X)
yhat.shape
test_X = test_X.reshape((test_X.shape[0], test_X.shape[1]*test_X.shape[2]))
test_y = test_y.reshape((len(test_y), 1))
# invert scaling for forecast
inv_yhat = concatenate((yhat, test_X), axis=1)
#print(tabulate(inv_yhat, headers=['inv_yhat'], tablefmt='orgtbl'))
inv_y = concatenate((test_y, test_X), axis=1)
#print(tabulate(inv_y, headers=['inv_y'], tablefmt='orgtbl'))
# calculate RMSE
rmse = sqrt(mean_squared_error(inv_y, inv_yhat))
rmse_list.append(rmse)
import numpy as np
import scipy.stats
import csv
a = 1.0 * np.array(rmse_list)
n = len(a)
mean, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + 0.95) / 2., n-1)
elapsed_time = getTime(time.time() - start_time)
myfile = open(dirpath + '/LSTMExperimentResults_AfterDefense/1%/RMSE_Confidence_Interval (epoch= 10, batch = 20 , neurons = 10).csv', 'a', newline='')
writer = csv.writer(myfile)
with myfile:
writer.writerow(
[percent,n_mins , rmse_list,mean, se, mean-h, mean+h,elapsed_time])
```
| github_jupyter |
```
import random
import os
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import copy
import pickle
#generate stats
from collections import defaultdict
from collections import Counter
%matplotlib inline
import json
#load categories yolo and coco names
import numpy as np
x = np.loadtxt('categories/9k.names',delimiter='\n',dtype=str)
yolo=x.tolist()
x = np.loadtxt('categories/coco.names',delimiter='\n',dtype=str)
coco=x.tolist()
wordtocnt = ['zero','one','two','three','four','five','six','seven','eight','nine',
'ten','eleven','twelve','thirteen','fourteen','fifteen']
def getnounsimple(question):
newtxt = question.replace('?','')
newtxt = newtxt.replace('How many','')
newtxt = newtxt.replace('how many','')
tokens = nltk.word_tokenize(newtxt)
for lookup in ['can','are','does','is','giraffe','giraffes','zebra']:
try:
noun = tokens[0:tokens.index(lookup)+1]
break
except:
noun = ''
return ' '.join(noun)
import nltk
lemmatizer = nltk.stem.WordNetLemmatizer()
def getnoun(sentence):
tokens = nltk.word_tokenize(sentence)
tagged = nltk.pos_tag(tokens)
tag = tagged[2:]
nouns = []
for i,j in enumerate(tag):
if j[1] =='NNS' or j[1] =='NNP':
nouns = tag[0:i+1]
break
#print (nouns)
if len(nouns) == 0:
return getnounsimple(sentence)
return ' '.join([lemmatizer.lemmatize(noun[0]) for noun in nouns])
def issimple(noun):
noun = noun.strip(" ")
split = noun.split(" ")
if len(split) == 1:
return True
return False
print (issimple('black dogs'))
v7wtell = Visual7wTell('visual7w/dataset_v7w_telling.json')
v7wtell.info()
from vqatools.visual7w import Visual7wPoint,Visual7wTell
v7wpoint = Visual7wPoint('visual7w/dataset_v7w_pointing.json')
v7wpoint.info()
v7wpoint.boxidtobox[794494]
v7wpoint.showQA(265555)
v7wpoint.qidtoqa[265555]
# howques = getQuesIds(quesType='how')
countques = v7wtell.getCountquesids()
print ("ther are %d count questions" %(len(countques)))
v7wtell.showQA(random.choice(countques))
countans =defaultdict(int)
for qid in countques:
qa= v7wtell.qidtoqa[qid]
countans[qa.get('answer')] +=1
prec = list(countans.keys())
prec
nounstats = defaultdict(list)
blank=0
for qid in countques:
qa= v7wtell.qidtoqa[qid]
noun = getnoun(qa.get('question'))
if noun == '':
print (qa.get('question'))
blank+=1
nounstats[noun].append(qid)
print ("Noun: ",noun)
simple=[]
commplex=[]
simpleyolo=[]
simplecoco=[]
for noun in nounstats.keys():
if issimple(noun):
simple.append(noun)
else:
commplex.append(noun)
if noun in yolo:
simpleyolo.append(noun)
if noun in coco:
simplecoco.append(noun)
simplequest= [ len(nounstats[noun]) for noun in simple]
print ("no of simple questions:",sum(simplequest))
complexquest= [ len(nounstats[noun]) for noun in commplex]
print ("no of complex questions:",sum(complexquest))
simplequest= [ len(nounstats[noun]) for noun in simpleyolo]
print ("no of simple questions in yolo:",sum(simplequest))
simplequest= [ len(nounstats[noun]) for noun in simplecoco]
print ("no of simple questions in coco:",sum(simplequest))
```
| github_jupyter |
<a href="https://colab.research.google.com/github/moh2236945/Natural-language-processing/blob/master/Apply%20features%20extrating%20and%20text%20normalization.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import re
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import string
import nltk
import numpy as np
%matplotlib inline
train=pd.read_csv('/content/train_E6oV3lV.csv')
test=pd.read_csv('/content/test_tweets_anuFYb8.csv')
train.head()
```
data has 3 columns id, label, and tweet. ***label*** is the binary target variable and ***tweet*** contains the tweets that we will clean and preprocess.
```
#Removing @ to do this we sure to combine train and test together fires
combi=train.append(test,ignore_index=True)
combi.shape
def remove_pattern(input_text,pattern):
r=re.findall(pattern,input_text)
for i in r:
input_text=re.sub(i,'',input_text)
return input_text
```
create a new column tidy_tweet,
it contain the cleaned and processed tweets. **Note** that we have passed “@[\w]*” as the pattern to the remove_pattern function. It is actually a regular expression which will pick any word starting with ‘@’.
```
combi['tidy_tweet']=np.vectorize(remove_pattern)(combi['tweet'],"@[\w]*")
combi.head()
#removing Punction,Number&Special chars
combi['tidy_tweet']=combi['tidy_tweet'].str.replace('[^a-zA-Z#]', "")
combi.head()
```
Removing Short Words
```
combi['tidy_tweet'] = combi['tidy_tweet'].apply(lambda x: ' '.join([w for w in x.split() if len(w)>3]))
```
Text Normalization
Steps:
Tokenization > Normalization
```
tokenized_tweet = combi['tidy_tweet'].apply(lambda x: x.split()) # tokenizing
tokenized_tweet.head()
from nltk.stem.porter import *
stemmer = PorterStemmer()
tokenized_tweet = tokenized_tweet.apply(lambda x: [stemmer.stem(i) for i in x]) # stemming
#stitch these tokens back together.
for i in range(len(tokenized_tweet)):
tokenized_tweet[i] = ' '.join(tokenized_tweet[i])
combi['tidy_tweet'] = tokenized_tweet
#Understanding the common words used in the tweets: WordCloud
#A wordcloud is a visualization wherein the most frequent words appear in large size and the less frequent words appear in smaller sizes.
all_words = ' '.join([text for text in combi['tidy_tweet']])
from wordcloud import WordCloud
wordcloud = WordCloud(width=800, height=500, random_state=21, max_font_size=110).generate(all_words)
plt.figure(figsize=(10, 7))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis('off')
plt.show()
#Words in non racist/sexist tweets
normal_words =' '.join([text for text in combi['tidy_tweet'][combi['label'] == 0]])
wordcloud = WordCloud(width=800, height=500, random_state=21, max_font_size=110).generate(normal_words)
plt.figure(figsize=(10, 7))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis('off')
plt.show()
# Understanding the impact of Hashtags on tweets sentiment
# function to collect hashtags
def hashtag_extract(x):
hashtags = []
# Loop over the words in the tweet
for i in x:
ht = re.findall(r"#(\w+)", i)
hashtags.append(ht)
return hashtags
# extracting hashtags from non racist/sexist tweets
HT_regular = hashtag_extract(combi['tidy_tweet'][combi['label'] == 0])
# extracting hashtags from racist/sexist tweets
HT_negative = hashtag_extract(combi['tidy_tweet'][combi['label'] == 1])
# unnesting list
HT_regular = sum(HT_regular,[])
HT_negative = sum(HT_negative,[])
#Non-Racist/Sexist Tweets
a = nltk.FreqDist(HT_regular)
d = pd.DataFrame({'Hashtag': list(a.keys()),
'Count': list(a.values())})
# selecting top 20 most frequent hashtags
d = d.nlargest(columns="Count", n = 20)
plt.figure(figsize=(16,5))
ax = sns.barplot(data=d, x= "Hashtag", y = "Count")
ax.set(ylabel = 'Count')
plt.show()
#extract Features from Cleaned tweets
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import gensim
#Bag-of-Words Features
bow_vectorizer = CountVectorizer(max_df=0.90, min_df=2, max_features=1000, stop_words='english')
bow = bow_vectorizer.fit_transform(combi['tidy_tweet'])
bow.shape
#TF-IDF Features
tfidf_vectorizer = TfidfVectorizer(max_df=0.90, min_df=2, max_features=1000, stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(combi['tidy_tweet'])
tfidf.shape
#Word Embedding
tokenized_tweet = combi['tidy_tweet'].apply(lambda x: x.split()) # tokenizing
model_w2v = gensim.models.Word2Vec(
tokenized_tweet,
size=200, # desired no. of features/independent variables
window=5, # context window size
min_count=2,
sg = 1, # 1 for skip-gram model
hs = 0,
negative = 10, # for negative sampling
workers= 2, # no.of cores
seed = 34)
model_w2v.train(tokenized_tweet, total_examples= len(combi['tidy_tweet']), epochs=20)
#Preparing Vectors for Tweets
def word_vector(tokens, size):
vec = np.zeros(size).reshape((1, size))
count = 0.
for word in tokens:
try:
vec += model_w2v[word].reshape((1, size))
count += 1.
except KeyError: # handling the case where the token is not in vocabulary
continue
if count != 0:
vec /= count
return vec
wordvec_arrays = np.zeros((len(tokenized_tweet), 200))
for i in range(len(tokenized_tweet)):
wordvec_arrays[i,:] = word_vector(tokenized_tweet[i], 200)
wordvec_df = pd.DataFrame(wordvec_arrays)
wordvec_df.shape
from tqdm import tqdm
tqdm.pandas(desc="progress-bar")
from gensim.models.doc2vec import LabeledSentence
def add_label(twt):
output = []
for i, s in zip(twt.index, twt):
output.append(LabeledSentence(s, ["tweet_" + str(i)]))
return output
labeled_tweets = add_label(tokenized_tweet) # label all the tweets
labeled_tweets[:6]
```
| github_jupyter |
# T1046 - Network Service Scanning
Adversaries may attempt to get a listing of services running on remote hosts, including those that may be vulnerable to remote software exploitation. Methods to acquire this information include port scans and vulnerability scans using tools that are brought onto a system.
Within cloud environments, adversaries may attempt to discover services running on other cloud hosts. Additionally, if the cloud environment is connected to a on-premises environment, adversaries may be able to identify services running on non-cloud systems as well.
## Atomic Tests
```
#Import the Module before running the tests.
# Checkout Jupyter Notebook at https://github.com/cyb3rbuff/TheAtomicPlaybook to run PS scripts.
Import-Module /Users/0x6c/AtomicRedTeam/atomics/invoke-atomicredteam/Invoke-AtomicRedTeam.psd1 - Force
```
### Atomic Test #1 - Port Scan
Scan ports to check for listening ports.
Upon successful execution, sh will perform a network connection against a single host (192.168.1.1) and determine what ports are open in the range of 1-65535. Results will be via stdout.
**Supported Platforms:** linux, macos
#### Attack Commands: Run with `sh`
```sh
for port in {1..65535};
do
echo >/dev/tcp/192.168.1.1/$port && echo "port $port is open" || echo "port $port is closed" : ;
done
```
```
Invoke-AtomicTest T1046 -TestNumbers 1
```
### Atomic Test #2 - Port Scan Nmap
Scan ports to check for listening ports with Nmap.
Upon successful execution, sh will utilize nmap, telnet, and nc to contact a single or range of adresseses on port 80 to determine if listening. Results will be via stdout.
**Supported Platforms:** linux, macos
#### Dependencies: Run with `sh`!
##### Description: Check if nmap command exists on the machine
##### Check Prereq Commands:
```sh
if [ -x "$(command -v nmap)" ]; then exit 0; else exit 1; fi;
```
##### Get Prereq Commands:
```sh
echo "Install nmap on the machine to run the test."; exit 1;
```
```
Invoke-AtomicTest T1046 -TestNumbers 2 -GetPreReqs
```
#### Attack Commands: Run with `sh`
```sh
nmap -sS 192.168.1.0/24 -p 80
telnet 192.168.1.1 80
nc -nv 192.168.1.1 80
```
```
Invoke-AtomicTest T1046 -TestNumbers 2
```
### Atomic Test #3 - Port Scan NMap for Windows
Scan ports to check for listening ports for the local host 127.0.0.1
**Supported Platforms:** windows
Elevation Required (e.g. root or admin)
#### Dependencies: Run with `powershell`!
##### Description: NMap must be installed
##### Check Prereq Commands:
```powershell
if (cmd /c "nmap 2>nul") {exit 0} else {exit 1}
```
##### Get Prereq Commands:
```powershell
Invoke-WebRequest -OutFile $env:temp\nmap-7.80-setup.exe https://nmap.org/dist/nmap-7.80-setup.exe
Start-Process $env:temp\nmap-7.80-setup.exe /S
```
```
Invoke-AtomicTest T1046 -TestNumbers 3 -GetPreReqs
```
#### Attack Commands: Run with `powershell`
```powershell
nmap 127.0.0.1```
```
Invoke-AtomicTest T1046 -TestNumbers 3
```
## Detection
System and network discovery techniques normally occur throughout an operation as an adversary learns the environment. Data and events should not be viewed in isolation, but as part of a chain of behavior that could lead to other activities, such as Lateral Movement, based on the information obtained.
Normal, benign system and network events from legitimate remote service scanning may be uncommon, depending on the environment and how they are used. Legitimate open port and vulnerability scanning may be conducted within the environment and will need to be deconflicted with any detection capabilities developed. Network intrusion detection systems can also be used to identify scanning activity. Monitor for process use of the networks and inspect intra-network flows to detect port scans.
## Shield Active Defense
### Software Manipulation
Make changes to a system's software properties and functions to achieve a desired effect.
Software Manipulation allows a defender to alter or replace elements of the operating system, file system, or any other software installed and executed on a system.
#### Opportunity
There is an opportunity for the defender to observe the adversary and control what they can see, what effects they can have, and/or what data they can access.
#### Use Case
A defender can change the output of a recon commands to hide simulation elements you don’t want attacked and present simulation elements you want the adversary to engage with.
#### Procedures
Hook the Win32 Sleep() function so that it always performs a Sleep(1) instead of the intended duration. This can increase the speed at which dynamic analysis can be performed when a normal malicious file sleeps for long periods before attempting additional capabilities.
Hook the Win32 NetUserChangePassword() and modify it such that the new password is different from the one provided. The data passed into the function is encrypted along with the modified new password, then logged so a defender can get alerted about the change as well as decrypt the new password for use.
Alter the output of an adversary's profiling commands to make newly-built systems look like the operating system was installed months earlier.
Alter the output of adversary recon commands to not show important assets, such as a file server containing sensitive data.
| github_jupyter |
# Heart Desease Prediction
Execise: Predict if a patient has a heart disease or not. We have a data which classified if patients have heart disease or not according to features in it. We will try to use this data to create a model which tries predict if a patient has this disease or not.
Dataset = https://archive.ics.uci.edu/ml/datasets/Heart+Disease
-----
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from math import pi
%matplotlib inline
%load_ext autoreload
%autoreload 2
```
## Load and prepare data
```
import os
os.path.abspath(os.getcwd())
data_path='./data/heart.csv'
df = pd.read_csv(data_path)
print(f"Dataframe shape: {df.shape}")
df.head(10)
```
## Understand the data
```
fig,ax = plt.subplots(figsize=(20, 20))
sns.heatmap(df.corr(), ax=ax, annot=True, linewidths=0.05, fmt= '.2f',cmap="magma")
plt.show()
print("People having heart diseace vs people who doesn't: \n", df.target.value_counts())
heart_disease = len(df[df['target']==1])
no_heart_disease = len(df[df['target']==0])
labels = ["Heart Diesease", "NO Heart Disease"]
sizes = [heart_disease, no_heart_disease]
colors = ['skyblue', 'yellowgreen']
plt.figure(figsize=(8,6))
plt.pie(sizes, labels=labels, colors=colors,
autopct='%1.2f%%', shadow=True)
plt.show()
```
## Feature Engineering
Dummy variables (cp, thal, slope)
```
cp = pd.get_dummies(df['cp'], prefix = "cp")
thal = pd.get_dummies(df['thal'], prefix = "thal")
slope = pd.get_dummies(df['slope'], prefix = "slope")
frames = [df, cp, thal, slope]
df = pd.concat(frames, axis = 1)
to_drop = ['cp','thal','slope']
df = df.drop(to_drop, axis=1)
df.head()
df = (df - np.min(df)) / (np.max(df) - np.min(df)).values
```
Get features and target
```
features = df.drop('target',axis =1)
targets = df.target.values
```
Split the dataset
```
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(features, targets, test_size = 0.20, random_state=42)
```
Visualize shape of the dataset and info
```
print(x_train.shape)
print(x_test.shape)
print(y_train.shape)
print(y_test.shape)
```
## Build models
### First easy model
```
# Imports
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation
from tensorflow.keras.optimizers import SGD
```
Build the model (4 layers)
```
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(x_train.shape[1],)))
model.add(Dense(64, activation='relu', input_shape=(x_train.shape[1],)))
model.add(Dense(32, activation='relu', input_shape=(x_train.shape[1],)))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='mean_squared_error', optimizer='adam',metrics=['mae','mse'])
model.summary()
```
Tensorboard configuration
```
from datetime import datetime
logdir = 'logs/scalars/' + datetime.now().strftime("%d-%m-%Y-%H-%M-%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir, histogram_freq=1)
```
Train the model
```
epochs_number = 100
history = model.fit(x_train, y_train, validation_split=0.2, epochs=epochs_number, batch_size=16, verbose=1, callbacks=[tensorboard_callback])
```
## Evaluate the model and results
evaluate model loss with epochs on train and test
Prediction vs original labels
### More complicated model
Recreate the input using 2 dim on output
```
print(y_train.shape)
print(x_train.shape)
```
Define the new model and compile
Fit the model
```
# fit the model to the training data
n_epochs = 200
n_batch = 10
```
Print model accuracy vs val accuracy
Print model loss vs val loss
# MNIST Example
https://keras.io/api/datasets/mnist/
Import the dataset
Create the model
Create a second more complicated model
In this case we use dropout to avoid overfitting: https://machinelearningmastery.com/dropout-for-regularizing-deep-neural-networks/
Tensorboard configuration
Train the model (and compile)
Compare the results (val loss)
| github_jupyter |
# Publications markdown generator for academicpages
Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `publications.py`. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one containing your data.
TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
## Data format
The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
- `excerpt` and `paper_url` can be blank, but the others must have values.
- `pub_date` must be formatted as YYYY-MM-DD.
- `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
This is how the raw file looks (it doesn't look pretty, use a spreadsheet or other program to edit and create).
```
!cat publications.tsv
```
## Import pandas
We are using the very handy pandas library for dataframes.
```
import pandas as pd
```
## Import TSV
Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
```
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
```
## Escape special characters
YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
```
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
```
## Creating the markdown files
This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
```
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
if len(str(item.paper_url)) > 5:
md += "\n[Download paper here](" + item.paper_url + ")\n"
md += "\ncitation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
```
These files are in the publications directory, one directory below where we're working from.
```
!ls ../_publications/
!cat ../_publications/2009-10-01-paper-title-number-1.md
```
| github_jupyter |
This notebook runs various solvers for a single step and dumps out some variables -- it is intended for unit testing.
At the moment, it is required to be at the top-level `pyro/` directory, because not all the functions find pyro's home directory on their own.
```
from pyro import Pyro
```
## advection
```
solver = "advection"
problem_name = "smooth"
param_file = "inputs.smooth"
other_commands = ["driver.max_steps=1", "mesh.nx=8", "mesh.ny=8"]
pyro_sim = Pyro(solver)
pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands)
pyro_sim.run_sim()
dens = pyro_sim.sim.cc_data.get_var("density")
dens.pretty_print(show_ghost=False)
```
## advection_nonuniform
```
solver = "advection_nonuniform"
problem_name = "slotted"
param_file = "inputs.slotted"
other_commands = ["driver.max_steps=1", "mesh.nx=8", "mesh.ny=8"]
pyro_sim = Pyro(solver)
pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands)
pyro_sim.run_sim()
dens = pyro_sim.sim.cc_data.get_var("density")
dens.pretty_print(show_ghost=False)
```
## advection_fv4
```
solver = "advection_fv4"
problem_name = "smooth"
param_file = "inputs.smooth"
other_commands = ["driver.max_steps=1", "mesh.nx=8", "mesh.ny=8"]
pyro_sim = Pyro(solver)
pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands)
pyro_sim.run_sim()
dens = pyro_sim.sim.cc_data.get_var("density")
dens.pretty_print(show_ghost=False)
```
## advection_rk
```
solver = "advection_rk"
problem_name = "tophat"
param_file = "inputs.tophat"
other_commands = ["driver.max_steps=1", "mesh.nx=8", "mesh.ny=8"]
pyro_sim = Pyro(solver)
pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands)
pyro_sim.run_sim()
dens = pyro_sim.sim.cc_data.get_var("density")
dens.pretty_print(show_ghost=False)
```
## compressible
```
solver = "compressible"
problem_name = "rt"
param_file = "inputs.rt"
other_commands = ["driver.max_steps=1", "mesh.nx=8", "mesh.ny=24", "driver.verbose=0", "compressible.riemann=CGF"]
pyro_sim = Pyro(solver)
pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands)
pyro_sim.run_sim()
dens = pyro_sim.sim.cc_data.get_var("density")
dens.pretty_print(show_ghost=False)
```
## compressible_fv4
```
solver = "compressible_fv4"
problem_name = "kh"
param_file = "inputs.kh"
other_commands = ["driver.max_steps=1", "mesh.nx=8", "mesh.ny=8", "driver.verbose=0"]
pyro_sim = Pyro(solver)
pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands)
pyro_sim.run_sim()
e = pyro_sim.sim.cc_data.get_var("eint")
e.pretty_print(show_ghost=False)
```
## compressible_rk
```
solver = "compressible_rk"
problem_name = "quad"
param_file = "inputs.quad"
other_commands = ["driver.max_steps=1", "mesh.nx=16", "mesh.ny=16", "driver.verbose=0"]
pyro_sim = Pyro(solver)
pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands)
pyro_sim.run_sim()
p = pyro_sim.sim.cc_data.get_var("pressure")
p.pretty_print(show_ghost=False)
```
## compressible_sdc
```
solver = "compressible_sdc"
problem_name = "sod"
param_file = "inputs.sod.y"
other_commands = ["driver.max_steps=1", "mesh.nx=4", "mesh.ny=16", "driver.verbose=0"]
pyro_sim = Pyro(solver)
pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands)
pyro_sim.run_sim()
p = pyro_sim.sim.cc_data.get_var("pressure")
p.pretty_print(show_ghost=False)
```
## diffusion
```
solver = "diffusion"
problem_name = "gaussian"
param_file = "inputs.gaussian"
other_commands = ["driver.max_steps=1", "mesh.nx=16", "mesh.ny=16", "driver.verbose=0"]
pyro_sim = Pyro(solver)
pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands)
pyro_sim.run_sim()
dens = pyro_sim.sim.cc_data.get_var("phi")
dens.pretty_print(show_ghost=False)
```
## incompressible
```
solver = "incompressible"
problem_name = "shear"
param_file = "inputs.shear"
other_commands = ["driver.max_steps=1", "mesh.nx=8", "mesh.ny=8", "driver.verbose=0"]
pyro_sim = Pyro(solver)
pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands)
pyro_sim.run_sim()
u = pyro_sim.sim.cc_data.get_var("x-velocity")
u.pretty_print(show_ghost=False)
```
## lm_atm
```
solver = "lm_atm"
problem_name = "bubble"
param_file = "inputs.bubble"
other_commands = ["driver.max_steps=1", "mesh.nx=16", "mesh.ny=16", "driver.verbose=0"]
pyro_sim = Pyro(solver)
pyro_sim.initialize_problem(problem_name, param_file, other_commands=other_commands)
pyro_sim.run_sim()
v = pyro_sim.sim.cc_data.get_var("y-velocity")
v.pretty_print(show_ghost=False, fmt="%10.3g")
```
| github_jupyter |
# A sample example to tuning the hyperparameters of Prophet classifier is shown as usecase.
```
from mango.tuner import Tuner
from mango.domain.distribution import loguniform
param_dict = {"changepoint_prior_scale": loguniform(-3, 4),
'seasonality_prior_scale' : loguniform(1, 2)
}
```
# userObjective
```
from classifiers.prophet import Prophet
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score
import numpy as np
model = Prophet()
import os
data_path = os.path.abspath('.')+'/classifiers/data/'
X_train, y_train =model.load_train_dataset(data_path+"PJME/train_data")
X_test, y_test = model.load_train_dataset(data_path+"PJME/test_data")
X_validate, y_validate = model.load_train_dataset(data_path+"PJME/validate_data")
count_called = 1
def objective_Prophet(args_list):
global X_train, y_train,X_validate,y_validate, count_called
print('count_called:',count_called)
count_called = count_called + 1
hyper_evaluated = []
results = []
for hyper_par in args_list:
clf = Prophet(**hyper_par)
clf.fit(X_train, y_train.ravel())
y_pred = clf.predict(X_validate)
mse = mean_squared_error(y_validate, y_pred)
mse = mse/10e5
result = (-1.0) * mse
results.append(result)
hyper_evaluated.append(hyper_par)
return hyper_evaluated, results
conf_Dict = dict()
conf_Dict['batch_size'] = 2
conf_Dict['num_iteration'] = 10
conf_Dict['initial_random'] = 5
#conf_Dict['domain_size'] = 10000
```
# Defining Tuner
```
tuner_user = Tuner(param_dict, objective_Prophet,conf_Dict)
tuner_user.getConf()
import time
start_time = time.clock()
results = tuner_user.maximize()
end_time = time.clock()
print(end_time - start_time)
```
# Inspect the results
```
print('best hyper parameters:',results['best_params'])
print('best objective:',results['best_objective'])
print('Sample hyper parameters tried:',len(results['params_tried']))
print(results['params_tried'][:2])
print('Sample objective values',len(results['objective_values']))
print(results['objective_values'][:5])
```
# Plotting the actual variation in objective values of the tried results
```
Size = 201
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(30,5))
plt.title('Variation of Objective',fontsize=20)
plt.plot(results['objective_values'][:Size],lw=4,label='BL')
plt.xlabel('Iterations', fontsize=25)
plt.ylabel('objective_values',fontsize=25)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.legend(prop={'size': 30})
plt.show()
```
# Plotting the variation of Max objective values of the tried results
```
Size = 201
import numpy as np
results_obj = np.array(results['objective_values'])
y_max=[]
for i in range(results_obj.shape[0]):
y_max.append(np.max(results_obj[:i+1]))
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(30,5))
plt.title('Max variation of Objective',fontsize=20)
plt.plot(y_max[:Size],lw=4,label='BL')
plt.xlabel('Iterations', fontsize=25)
plt.ylabel('objective_values',fontsize=25)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.legend(prop={'size': 30})
plt.show()
```
# See the Result
```
import pprint
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(results)
```
# See the learned classifier result on the test data
```
model = Prophet(**results['best_params'])
model.fit(X_train, y_train.ravel())
y_pred = model.predict(X_test)
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(30,10))
plt.rcParams.update({'font.size': 18})
plt.plot(X_test,y_test,label='Test')
plt.plot(X_test,y_pred,label='Prediction')
plt.title('Testing Data')
plt.legend()
plt.show()
```
# All the Data
```
from classifiers.prophet import Prophet
model = Prophet()
import os
data_path = os.path.abspath('.')+'/classifiers/data/'
X_train, y_train =model.load_train_dataset(data_path+"PJME/train_data")
X_test, y_test = model.load_train_dataset(data_path+"PJME/test_data")
X_validate, y_validate = model.load_train_dataset(data_path+"PJME/validate_data")
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(30,10))
plt.rcParams.update({'font.size': 18})
plt.plot(X_train,y_train,label='Train')
plt.plot(X_validate,y_validate,label='validate')
plt.plot(X_test,y_test,label='Test')
plt.title('All Data')
plt.legend()
plt.show()
```
| github_jupyter |
# 23mer Regression analysis
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import rc
from itertools import cycle
import pickle
import sklearn.manifold
from sklearn.metrics import roc_curve, auc, r2_score, mean_squared_error, make_scorer
from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV, KFold, cross_validate, cross_val_score
from sklearn.preprocessing import label_binarize
from sklearn.svm import LinearSVR, SVR
from sklearn.feature_selection import SelectFromModel, RFECV, VarianceThreshold
from sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn import linear_model, decomposition
from sklearn.pipeline import Pipeline, make_pipeline
from yellowbrick.regressor import ResidualsPlot
from yellowbrick.features import RFECV as yellowRFECV
from sklearn.kernel_ridge import KernelRidge
from math import log10, pow, log,sqrt
from statistics import mean
import scipy.stats as stats
#Load data
rownames = np.loadtxt(open("/home/pierre/Desktop/Position_Matrix/ML_REG_input23mer_7514.csv", "rb"),delimiter=',', dtype='str')[0,:]
names = np.asarray([_ for _ in rownames])
df = np.loadtxt(open("/home/pierre/Desktop/Position_Matrix/ML_REG_input23mer_7514.csv", "rb"),delimiter=',', usecols=range(1,rownames.shape[0]), dtype='float', skiprows=1)
sgRNA_seq = np.loadtxt(open("/home/pierre/Desktop/Position_Matrix/ML_REG_input23mer_7514.csv", "rb"),delimiter=',', usecols=0, dtype='str', skiprows=1)
#sort by sequence (not by activity/efficiency which is the response variable)
indx = sgRNA_seq.argsort()
df = df[indx,:]
#Standard scale non binary features
sc = StandardScaler()
df[:,1:26] = sc.fit_transform(df[:,1:26])
#Save standardized dataset
pd.DataFrame(np.column_stack((sgRNA_seq, df)), columns=names).to_csv("/home/pierre/Desktop/Position_Matrix/ML_REG_input23mer_7514-scaled.csv",index=False)
#Number of cpus to use for multithreading
n_cpu = 2
```
### RFECV
```
#Recursive Feature Elimination with Cross-Validation to remove the irrelevant features
modelnames = [
"Linear Regression",
"Linear SVR",
"l1 Linear Regression",
"Gradient-Boosted Decision Tree",
"Random Forest"
]
model = [
LinearRegression(n_jobs=n_cpu),
LinearSVR(random_state=0),
Ridge(random_state=0),
GradientBoostingRegressor(random_state=0),
RandomForestRegressor(random_state=0)
]
rfecv = dict()
models=dict()
datasets=dict()
supports = dict()
i=0
for name, clf in zip(modelnames, model):
model = RFECV(estimator=clf,
cv=KFold(n_splits=10, random_state=0),
scoring='r2', n_jobs=n_cpu)
rfecv[i] = model
model.fit(df[:,1:], df[:,0])
X_new = model.transform(df[:,1:])
sup = model.get_support(True)
#Insert Column names
X_new = pd.DataFrame(X_new, columns=names[1:][sup])
print(X_new.shape)
models[i] = model
datasets[i] = X_new
supports[i] = sup
i+=1
# RFECV to plot
modelnames = [
"Linear Regression",
"Linear SVR",
"l2 Linear Regression",
"Gradient-Boosted Decision Tree",
"Random Forest"
]
n_groups = 5
# create plot
fig, axes = plt.subplots(nrows=3,ncols=2,figsize=(25,20))
plt.figure()
index = np.arange(n_groups)
for ax, i in zip(axes.flatten(),range(n_groups)):
rfecv_model = rfecv[i]
X_newL1 = datasets[i]
ax.set_xlabel("Number of features selected", fontsize=20)
ax.set_ylabel("Cross validation score (R2 score)", fontsize=20)
ax.set_title("%s - Optimal number of features : %d" % (modelnames[i], X_newL1.shape[1]), fontsize=20)
ax.plot(range(1, len(rfecv_model.grid_scores_) + 1), rfecv_model.grid_scores_)
plt.show()
data = {
'rfecv': rfecv,
'datasets': datasets,
'supports': supports
}
pickle_out = open("23mer_RFECV.pickle","wb")
pickle.dump(data, pickle_out)
pickle_out.close()
# # Uncomment to load the frecv and datasets
# pickle_in = open("23mer_RFECV.pickle","rb")
# p_load = pickle.load(pickle_in)
# rfecv = p_load['rfecv']
# datasets = p_load['datasets']
# supports = p_load['supports']
```
## Hyperparameter Optimization & Performance Evaluation
```
# find the best hyperparameters for each model and evaluate its performance on the training set
models = [LinearRegression(),
LinearSVR(),
Ridge(),
GradientBoostingRegressor(),
RandomForestRegressor()
]
# set up params
tuned_parameters = [{},
{'loss': ['epsilon_insensitive','squared_epsilon_insensitive'],
'epsilon': [0,0.001,0.01,0.1,1],
'C': [0.001,0.01,0.1,1,10,100,1000]},
{'alpha': np.logspace(log10(10e-5),log10(1.5e5),100)},
{'n_estimators':[50,100,150,200],
'max_depth':[2,4,6,8,10],
'min_samples_split':[2,4],
'min_samples_leaf':[1,2],
'max_features':['auto','sqrt','log2']},
{'n_estimators':[50,100,150,200],
'max_depth':[2,4,6,8,10],
'min_samples_split':[2,4],
'min_samples_leaf':[1,2],
'max_features':['auto','sqrt','log2']}
]
grid = dict()
for i in datasets :
print(datasets[i].shape)
grid[i] = dict()
for j in range(len(models)):
print(str(models[j]))
print(str(tuned_parameters[j]))
print()
#Inner CV for parameter Optimization
grid[i][j] = GridSearchCV(models[j],
tuned_parameters[j],
cv=KFold(n_splits=10, shuffle=True, random_state=i+j),
scoring='r2',n_jobs=n_cpu).fit(datasets[i], df[:,0])
r2 = dict()
r2_adj = dict()
RMSE = dict()
for i in grid :
print(datasets[i].shape)
n =(datasets[i].shape[0])
k =(datasets[i].shape[1])
r2[i] = dict()
r2_adj[i] = dict()
RMSE[i] = dict()
for j in range(len(grid[i])):
print(str(grid[i][j]))
scoreR2 = np.mean(cross_val_score(grid[i][j].best_estimator_, datasets[i], df[:,0], cv=KFold(n_splits=10, shuffle=True, random_state=i+j), scoring='r2', n_jobs=n_cpu))
print('r2 = ',scoreR2)
scoreR2_adj =1-(1-scoreR2)*(n-1)/(n-(k+1))
print('r2_adj = ',scoreR2_adj)
scoreRMSE = np.mean(cross_val_score(grid[i][j].best_estimator_, datasets[i], df[:,0], cv=KFold(n_splits=10, shuffle=True, random_state=i+j), scoring='neg_mean_squared_error', n_jobs=n_cpu))
print('RMSE = ',sqrt(abs(scoreRMSE)))
r2[i][j] = scoreR2
r2_adj[i][j] = scoreR2_adj
RMSE[i][j] = sqrt(abs(scoreRMSE))
print('_____________')
data = {
'grid': grid,
'r2': r2,
'r2_adj': r2_adj,
'RMSE': RMSE
}
pickle_out = open("23mer_GRID&Perf.pickle","wb")
pickle.dump(data, pickle_out)
pickle_out.close()
# #Uncomment to load performance
# pickle_in = open("23mer_GRID&Perf.pickle","rb")
# p_load = pickle.load(pickle_in)
# grid = p_load['grid']
# r2 = p_load['r2']
# r2_adj = p_load['r2_adj']
# RMSE = p_load['RMSE']
```
# Plot the performance comparison on the training set
```
# data to plot
n_groups = 5
metrics = {'r2':r2, 'r2_adj':r2_adj, 'RMSE':RMSE}
# create plot
fig, axes = plt.subplots(nrows=3,ncols=1,figsize=(15,20))
index = np.arange(n_groups)
bar_width = 0.14
opacity = 0.8
i=0
for ax, v in zip(axes.flatten(),metrics.keys()):
FS_LinReg = list()
FS_LinSVR = list()
FS_Ridge = list()
FS_GBRT = list()
FS_RF = list()
for i in range(len(metrics[v])):
FS_LinReg.append(metrics[v][i][0])
FS_LinSVR.append(metrics[v][i][1])
FS_Ridge.append(metrics[v][i][2])
FS_GBRT.append(metrics[v][i][3])
FS_RF.append(metrics[v][i][4])
FS_LinReg = tuple(FS_LinReg)
FS_LinSVR = tuple(FS_LinSVR)
FS_Ridge = tuple(FS_Ridge)
FS_GBRT = tuple(FS_GBRT)
FS_RF = tuple(FS_RF)
rects1 = ax.bar(index, FS_LinReg, bar_width, align = 'center',
alpha=opacity,
color='blue',
label='LinReg')
rects2 = ax.bar(index + bar_width, FS_LinSVR, bar_width, align = 'center',
alpha=opacity,
color='red',
label='LinSVR')
rects5 = ax.bar(index + bar_width*2, FS_Ridge, bar_width, align = 'center',
alpha=opacity,
color='purple',
label='Ridge')
rects6 = ax.bar(index + bar_width*3, FS_GBRT, bar_width, align = 'center',
alpha=opacity,
color='orange',
label='GBRT')
rects7 = ax.bar(index + bar_width*4, FS_RF, bar_width, align = 'center',
alpha=opacity,
color='yellow',
label='RF')
ax.set_xlabel('Regression Models',fontsize=20)
ax.set_ylabel(v,fontsize=20)
ax.set_title('%s of different ML models' % v,fontsize=20)
ax.set_xticks(index + bar_width*3)
ax.set_xticklabels(['FS_LinReg %i ' % datasets[0].shape[1],
'FS_LinSVR %i ' % datasets[1].shape[1],
'FS_Ridge %i ' % datasets[2].shape[1],
'FS_GBRT %i ' % datasets[3].shape[1],
'FS_RF %i ' % datasets[4].shape[1]], fontsize=20)
ax.legend(fontsize=15,loc='upper right', bbox_to_anchor=(1.12, 1), ncol=1)
i+=1
plt.tight_layout()
plt.show()
```
## Save Best Model
```
data = {
'model': grid[2][2].best_estimator_,
'df_indexes': supports[2]
}
pickle_out = open("23mer_135FS_Ridge_REGmodel.pickle","wb")
pickle.dump(data, pickle_out)
pickle_out.close()
# # Uncomment ot lead the best model
# pickle_in = open("23mer_135FS_Ridge_REGmodel.pickle","rb")
# p_load = pickle.load(pickle_in)
# Model = p_load['model']
# idx = p_load['df_indexes']
```
# Residual plot Analysis
```
# Create the train and test data
X_train, X_test, y_train, y_test = train_test_split(datasets[2], df[:,0], test_size=0.2)
model = Model
visualizer = ResidualsPlot(model)
visualizer.fit(X_train, y_train) # Fit the training data to the model
visualizer.score(X_test, y_test) # Evaluate the model on the test data
visualizer.poof()
```
## feature importance
```
df_importance = pd.DataFrame(grid[2][3].best_estimator_.feature_importances_, datasets[2].columns)
with pd.option_context('display.max_rows', None, 'display.max_columns', 3):
print(df_importance)
pos_indep_order1 = 0
print(pos_indep_order1)
pos_dep_order1 = sum(df_importance.iloc[3:31][0])
print(pos_dep_order1)
pos_indep_order2 = sum(df_importance.iloc[0:3][0])
print(pos_indep_order2)
pos_dep_order2 = sum(df_importance.iloc[31:][0])
print(pos_dep_order2)
#PAM_bounds = sum(df_importance.iloc[197:][0])
#print(PAM_bounds)
print(sum((pos_indep_order1,pos_indep_order2,pos_dep_order1,pos_dep_order2
#,PAM_bounds
)))
pos = np.arange(4) + .5
plt.subplot(1, 2, 2)
plt.barh(pos,(pos_indep_order1,pos_indep_order2,pos_dep_order1,pos_dep_order2
#,PAM_bounds
), align='center')
plt.yticks(pos, ('pos_indep_order1','pos_indep_order2','pos_dep_order1','pos_dep_order2'
#,'PAM_bounds'
))
plt.xlabel('Importance')
plt.title('23mer GBRT Variable Importance')
plt.show()
```
## Guide efficiency prediction Performance
### Training set
```
scoredf = np.loadtxt(open("/home/pierre/Desktop/Position_Matrix/Analysis-23mer_sgRNA_7514predictions.csv", "rb"), delimiter=',', usecols=(1,2), dtype='float', skiprows=1)
indx = scoredf[:,1].argsort()
scoredf = scoredf[indx,:]
dic_scores = dict()
k=0
for i in np.arange(0,1,0.1):
dic_scores[k]= scoredf[np.where((scoredf[:,1]>=i) & (scoredf[:,1]<i+0.1)),]
k+=1
#print(dic_scores)
npRed = list()
npOrange = list()
npYellow = list()
npGreen = list()
for i in dic_scores:
npRed.append(len(np.where((dic_scores[i][0][:,0]>=0) & (dic_scores[i][0][:,0]<0.25))[0])/len(dic_scores[i][0][:,0]))
npOrange.append(len(np.where((dic_scores[i][0][:,0]>=0.25) & (dic_scores[i][0][:,0]<0.5))[0])/len(dic_scores[i][0][:,0]))
npYellow.append(len(np.where((dic_scores[i][0][:,0]>=0.5) & (dic_scores[i][0][:,0]<0.75))[0])/len(dic_scores[i][0][:,0]))
npGreen.append(len(np.where((dic_scores[i][0][:,0]>=0.75) & (dic_scores[i][0][:,0]<1.0))[0])/len(dic_scores[i][0][:,0]))
# Data
r = [0,1,2,3,4,5,6,7,8,9]
raw_data = {'greenBars': npGreen,
'yellowBars': npYellow,
'orangeBars': npOrange,
'redBars': npRed
}
df = pd.DataFrame(raw_data)
# From raw value to percentage
totals = [i+j+k+l for i,j,k,l in zip(df['greenBars'], df['yellowBars'], df['orangeBars'], df['redBars'])]
greenBars = [i / j for i,j in zip(df['greenBars'], totals)]
yellowBars = [i / j for i,j in zip(df['yellowBars'], totals)]
orangeBars = [i / j for i,j in zip(df['orangeBars'], totals)]
redBars = [i / j for i,j in zip(df['redBars'], totals)]
# plot
plt.figure(figsize=(20,10))
plt.rc('axes', titlesize=20)
plt.rc('xtick', labelsize=20)
plt.rc('ytick', labelsize=20)
barWidth = 0.85
names = ('0.0-0.1\nn=%s' % len(dic_scores[0][0][:,0]),
'0.1-0.2\nn=%s' % len(dic_scores[1][0][:,0]),
'0.2-0.3\nn=%s' % len(dic_scores[2][0][:,0]),
'0.3-0.4\nn=%s' % len(dic_scores[3][0][:,0]),
'0.4-0.5\nn=%s' % len(dic_scores[4][0][:,0]),
'0.5-0.6\nn=%s' % len(dic_scores[5][0][:,0]),
'0.6-0.7\nn=%s' % len(dic_scores[6][0][:,0]),
'0.7-0.8\nn=%s' % len(dic_scores[7][0][:,0]),
'0.8-0.9\nn=%s' % len(dic_scores[8][0][:,0]),
'0.9-1.0\nn=%s' % len(dic_scores[9][0][:,0]))
# Create green Bars
plt.bar(r, greenBars, color='g', edgecolor='black', width=barWidth)
# Create yellow Bars
plt.bar(r, yellowBars, bottom=greenBars, color='yellow', edgecolor='black', width=barWidth)
# Create orange Bars
plt.bar(r, orangeBars, bottom=[i+j for i,j in zip(greenBars, yellowBars)], color='orange', edgecolor='black', width=barWidth)
# Create red Bars
plt.bar(r, redBars, bottom=[i+j+k for i,j,k in zip(greenBars, yellowBars,orangeBars)], color='red', edgecolor='black', width=barWidth)
# Custom x axis
plt.xticks(r, names)
plt.title("23mer efficiency on training set")
plt.xlabel("dMel efficiency prediction", fontsize=20)
plt.ylabel("True efficieny quartiles", fontsize=20)
# Show graphic
plt.show()
```
### Testing set
```
scoredf = np.loadtxt(open("/home/pierre/Desktop/Position_Matrix/Analysis-Test_Set_Droso.csv", "rb"), delimiter=',', usecols=(4,6), dtype='float', skiprows=1)
indx = scoredf[:,1].argsort()
scoredf = scoredf[indx,:]
dic_scores = dict()
k=0
for i in np.arange(0.1,0.9,0.1):
dic_scores[k]= scoredf[np.where((scoredf[:,1]>=i) & (scoredf[:,1]<i+0.1)),]
k+=1
#print(dic_scores)
npRed = list()
npOrange = list()
npYellow = list()
npGreen = list()
for i in dic_scores:
npRed.append(len(np.where((dic_scores[i][0][:,0]>=0) & (dic_scores[i][0][:,0]<0.25))[0])/len(dic_scores[i][0][:,0]))
npOrange.append(len(np.where((dic_scores[i][0][:,0]>=0.25) & (dic_scores[i][0][:,0]<0.5))[0])/len(dic_scores[i][0][:,0]))
npYellow.append(len(np.where((dic_scores[i][0][:,0]>=0.5) & (dic_scores[i][0][:,0]<0.75))[0])/len(dic_scores[i][0][:,0]))
npGreen.append(len(np.where((dic_scores[i][0][:,0]>=0.75) & (dic_scores[i][0][:,0]<1.0))[0])/len(dic_scores[i][0][:,0]))
# Data
r = [1,2,3,4,5,6,7,8]
raw_data = {'greenBars': npGreen,
'yellowBars': npYellow,
'orangeBars': npOrange,
'redBars': npRed
}
df = pd.DataFrame(raw_data)
# From raw value to percentage
totals = [i+j+k+l for i,j,k,l in zip(df['greenBars'], df['yellowBars'], df['orangeBars'], df['redBars'])]
greenBars = [i / j for i,j in zip(df['greenBars'], totals)]
yellowBars = [i / j for i,j in zip(df['yellowBars'], totals)]
orangeBars = [i / j for i,j in zip(df['orangeBars'], totals)]
redBars = [i / j for i,j in zip(df['redBars'], totals)]
# plot
plt.figure(figsize=(20,10))
plt.rc('axes', titlesize=20)
plt.rc('xtick', labelsize=20)
plt.rc('ytick', labelsize=20)
barWidth = 0.85
names = (
'0.1-0.2\nn=%s' % len(dic_scores[0][0][:,0]),
'0.2-0.3\nn=%s' % len(dic_scores[1][0][:,0]),
'0.3-0.4\nn=%s' % len(dic_scores[2][0][:,0]),
'0.4-0.5\nn=%s' % len(dic_scores[3][0][:,0]),
'0.5-0.6\nn=%s' % len(dic_scores[4][0][:,0]),
'0.6-0.7\nn=%s' % len(dic_scores[5][0][:,0]),
'0.7-0.8\nn=%s' % len(dic_scores[6][0][:,0]),
'0.8-0.9\nn=%s' % len(dic_scores[7][0][:,0]))
# Create green Bars
plt.bar(r, greenBars, color='g', edgecolor='black', width=barWidth)
# Create yellow Bars
plt.bar(r, yellowBars, bottom=greenBars, color='yellow', edgecolor='black', width=barWidth)
# Create orange Bars
plt.bar(r, orangeBars, bottom=[i+j for i,j in zip(greenBars, yellowBars)], color='orange', edgecolor='black', width=barWidth)
# Create red Bars
plt.bar(r, redBars, bottom=[i+j+k for i,j,k in zip(greenBars, yellowBars,orangeBars)], color='red', edgecolor='black', width=barWidth)
# Custom x axis
plt.xticks(r, names)
plt.title("23mer efficiency on testing set")
plt.xlabel("dMel efficiency prediction", fontsize=20)
plt.ylabel("True efficieny quartiles", fontsize=20)
# Show graphic
plt.show()
```
| github_jupyter |
## CSc 4222 - Cyber Security | Assignment 2
### Bryan W. Nonni
### Password Salt System Implementation and Brutal Force Cracker
### 1. Implementation of the Password Salt System
In this section, students are required to implement a password salt verification system. With the given UID and Hash files, students need to implement the verification system, such that the given example of the password and salt can match with the hash value in the `Hash.txt` file. For example, the first`UID` is `001`, the `password` is `0599`, the salt associated with the first `UID` is `054`. When applying the MD5 Hash Function with the encode format as `utf-8` as shown in the figure below, the expected output should be `4a1d6f102cd95fac33853e4d72fe1dc5`. It is worth to mention that, the concatenation between password and salt needs to be in the format of `(password||salt)`. For example, with the aforementioned input, the concatenation result will be `0599054`. 0 should not be omitted.
__Requirement for the designed system:__
The designed verification system should be able to correctly verify the example shown above. When the input is correct, the system will output a String “The input password and salt matches the hash value in the database”. Otherwise, the output should be “The input password and salt does not match the hash value in the database”.
```
from hashlib import md5
import pandas as pd
Hash = open('Hash.txt', 'r')
UID = open('UID.txt', 'r')
hash_dictionary = { 'uid': UID, 'hash': Hash }
hash_df = pd.DataFrame(hash_dictionary).replace('\n', '', regex=True)
hash_df.head(5)
def computeMD5hash(pwsalt):
m = md5()
m.update(pwsalt.encode('utf-8'))
return m.hexdigest()
uid001_hash = '4a1d6f102cd95fac33853e4d72fe1dc5'
compute_hash = computeMD5hash('0599 054')
print(uid001_hash, "matches", compute_hash, "=>", True) if uid001_hash == compute_hash else print(False)
```
### 2. Implementation of the Cracker System
To reduce the complexity for cracking the password and salt, the passwords are randomly set in the range of `[0000, 1000]`, while the salt is randomly set in the range of `[000,100]` for each `UID`. One easy idea to implement a cracker system is to brute-forcely try all possible combinations of password and salt for one UID. As the `Hash.txt` and `UID.txt` files are given, students are requested to implement a cracker system which could find the correct password and salt for a specific `UID`.
__Requirement for the designed system:__
For a specific `UID`, the cracker system can output the correct password and salt value. For example, when input the `UID` as `001`, the output should be `password: 0599; salt: 054`.
__Demo and Report:__
__1)__ Each student is required to go to either TA or instructor to demo both systems. The TA or instructor will ask the students to run one or two specific UID(s) to check the corresponding password and salt.
__2)__ The report should firstly describe how these two systems are designed; secondly, the report should also include the set of passwords and salts for ten different UIDs.
<a href='./Report.txt'>Bryan's Report</a>
__3)__ For undergraduate students, the verification and cracker systems can be designed separately. For graduate students, the cracker system should include the function of verification system.
```
salt = [f"{i:03}" for i in range(1000)]
password = [f"{i:04}" for i in range(10000)]
def getUidHash(UID):
j = 0
Hash = hash_df.loc[hash_df.uid == UID, 'hash'].values
Hash = Hash[-1]
print('uid', UID, 'Hash:',Hash)
while(j < len(hash_df)):
for p in password:
for s in salt:
pass_salt = p + s
#print(pass_salt)
md5_hash = computeMD5hash(pass_salt)
#print(md5_hash)
if md5_hash == Hash:
return 'Match! uid: {}; password: {}; salt: {}; hash: {}'.format(UID, p, s, Hash)
else:
pass
j+=1
getUidHash('059')
getUidHash('002')
getUidHash('003')
getUidHash('004')
def executeBruteForceAttack():
i = 0
while(i < len(hash_df)):
uid = hash_df['uid'][i]
Hash = hash_df['hash'][i]
print(Hash)
for p in password:
for s in salt:
pass_salt = p + s
md5_hash = computeMD5hash(pass_salt)
if md5_hash == Hash:
print("Match! uid: {}; password: {}; salt: {}; hash: {}\n".format(uid, p, s, Hash))
else:
pass
i+=1
executeBruteForceAttack()
```
| github_jupyter |
```
from google.colab import drive
drive.mount('/content/drive')
!pip install picklable_itertools
!pip install fuel
!pip install foolbox
%reload_ext autoreload
%autoreload 2
%matplotlib inline
import numpy as np
PROJECT_DIR = "/content/drive/My Drive/2018/Colab_Deep_Learning/one_class_neural_networks/"
import sys,os
import numpy as np
sys.path.append(PROJECT_DIR)
```
##** MNIST 0 Vs All **##
```
## Obtaining the training and testing data
%reload_ext autoreload
%autoreload 2
import numpy as np
from src.config import Configuration as Cfg
from src.models.svm import SVM
DATASET = "mnist"
ESTIMATORS = 100
MAX_SAMPLES = 250
CONTAMINATION = 0.1
MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/"
REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/"
PRETRAINED_WT_PATH = ""
RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11]
AUC = []
## Setting the required config values
Cfg.out_frac = 0.1
Cfg.ad_experiment = 1 # 1 : yes # 0 : No
Cfg.unit_norm_used = "l1"
Cfg.gcn = 1 # 1 : yes # 0 : No
Cfg.zca_whitening = 0 # 1 : yes # 0 : No
Cfg.pca = 0 # 1 for yes # 0 : No
Cfg.mnist_val_frac = 0.1
Cfg.mnist_normal = 0
Cfg.mnist_outlier = -1
# SVM parameters
Cfg.svm_nu = 0.1
Cfg.svm_GridSearchCV = 1
for seed in RANDOM_SEED:
# plot parameters
# Cfg.xp_path = REPORT_SAVE_PATH
# dataset
Cfg.seed = seed
# initialize OC-SVM
ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf")
# train OC-SVM model
ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV)
# predict scores
auc_roc = ocsvm.predict(which_set='test')
print("========================================================================",)
print("AUROC: ",auc_roc)
print("========================================================================",)
AUC.append(auc_roc)
print("===========AURO Computed============================")
print("AUROC computed ", AUC)
auc_roc_mean = np.mean(np.asarray(AUC))
auc_roc_std = np.std(np.asarray(AUC))
print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100)
print("========================================================================")
```
##** MNIST 1 Vs All **##
```
## Obtaining the training and testing data
%reload_ext autoreload
%autoreload 2
import numpy as np
from src.config import Configuration as Cfg
from src.models.svm import SVM
DATASET = "mnist"
ESTIMATORS = 100
MAX_SAMPLES = 250
CONTAMINATION = 0.1
MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/"
REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/"
PRETRAINED_WT_PATH = ""
RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11]
AUC = []
## Setting the required config values
Cfg.out_frac = 0.1
Cfg.ad_experiment = 1 # 1 : yes # 0 : No
Cfg.unit_norm_used = "l1"
Cfg.gcn = 1 # 1 : yes # 0 : No
Cfg.zca_whitening = 0 # 1 : yes # 0 : No
Cfg.pca = 0 # 1 for yes # 0 : No
Cfg.mnist_val_frac = 0.1
Cfg.mnist_normal = 1
Cfg.mnist_outlier = -1
# SVM parameters
Cfg.svm_nu = 0.1
Cfg.svm_GridSearchCV = 1
for seed in RANDOM_SEED:
# plot parameters
# Cfg.xp_path = REPORT_SAVE_PATH
# dataset
Cfg.seed = seed
# initialize OC-SVM
ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf")
# train OC-SVM model
ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV)
# predict scores
auc_roc = ocsvm.predict(which_set='test')
print("========================================================================",)
print("AUROC: ",auc_roc)
print("========================================================================",)
AUC.append(auc_roc)
print("===========AURO Computed============================")
print("AUROC computed ", AUC)
auc_roc_mean = np.mean(np.asarray(AUC))
auc_roc_std = np.std(np.asarray(AUC))
print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100)
print("========================================================================")
```
##** MNIST 2 Vs All **##
```
## Obtaining the training and testing data
%reload_ext autoreload
%autoreload 2
import numpy as np
from src.config import Configuration as Cfg
from src.models.svm import SVM
DATASET = "mnist"
ESTIMATORS = 100
MAX_SAMPLES = 250
CONTAMINATION = 0.1
MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/"
REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/"
PRETRAINED_WT_PATH = ""
RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11]
AUC = []
## Setting the required config values
Cfg.out_frac = 0.1
Cfg.ad_experiment = 1 # 1 : yes # 0 : No
Cfg.unit_norm_used = "l1"
Cfg.gcn = 1 # 1 : yes # 0 : No
Cfg.zca_whitening = 0 # 1 : yes # 0 : No
Cfg.pca = 0 # 1 for yes # 0 : No
Cfg.mnist_val_frac = 0.1
Cfg.mnist_normal = 2
Cfg.mnist_outlier = -1
# SVM parameters
Cfg.svm_nu = 0.1
Cfg.svm_GridSearchCV = 1
for seed in RANDOM_SEED:
# plot parameters
# Cfg.xp_path = REPORT_SAVE_PATH
# dataset
Cfg.seed = seed
# initialize OC-SVM
ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf")
# train OC-SVM model
ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV)
# predict scores
auc_roc = ocsvm.predict(which_set='test')
print("========================================================================",)
print("AUROC: ",auc_roc)
print("========================================================================",)
AUC.append(auc_roc)
print("===========AURO Computed============================")
print("AUROC computed ", AUC)
auc_roc_mean = np.mean(np.asarray(AUC))
auc_roc_std = np.std(np.asarray(AUC))
print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100)
print("========================================================================")
```
##** MNIST 3 Vs All **##
```
## Obtaining the training and testing data
%reload_ext autoreload
%autoreload 2
import numpy as np
from src.config import Configuration as Cfg
from src.models.svm import SVM
DATASET = "mnist"
ESTIMATORS = 100
MAX_SAMPLES = 250
CONTAMINATION = 0.1
MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/"
REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/"
PRETRAINED_WT_PATH = ""
RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11]
AUC = []
## Setting the required config values
Cfg.out_frac = 0.1
Cfg.ad_experiment = 1 # 1 : yes # 0 : No
Cfg.unit_norm_used = "l1"
Cfg.gcn = 1 # 1 : yes # 0 : No
Cfg.zca_whitening = 0 # 1 : yes # 0 : No
Cfg.pca = 0 # 1 for yes # 0 : No
Cfg.mnist_val_frac = 0.1
Cfg.mnist_normal = 3
Cfg.mnist_outlier = -1
# SVM parameters
Cfg.svm_nu = 0.1
Cfg.svm_GridSearchCV = 1
for seed in RANDOM_SEED:
# plot parameters
# Cfg.xp_path = REPORT_SAVE_PATH
# dataset
Cfg.seed = seed
# initialize OC-SVM
ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf")
# train OC-SVM model
ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV)
# predict scores
auc_roc = ocsvm.predict(which_set='test')
print("========================================================================",)
print("AUROC: ",auc_roc)
print("========================================================================",)
AUC.append(auc_roc)
print("===========AURO Computed============================")
print("AUROC computed ", AUC)
auc_roc_mean = np.mean(np.asarray(AUC))
auc_roc_std = np.std(np.asarray(AUC))
print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100)
print("========================================================================")
```
##** MNIST 4 Vs All **##
```
## Obtaining the training and testing data
%reload_ext autoreload
%autoreload 2
import numpy as np
from src.config import Configuration as Cfg
from src.models.svm import SVM
DATASET = "mnist"
ESTIMATORS = 100
MAX_SAMPLES = 250
CONTAMINATION = 0.1
MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/"
REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/"
PRETRAINED_WT_PATH = ""
RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11]
AUC = []
## Setting the required config values
Cfg.out_frac = 0.1
Cfg.ad_experiment = 1 # 1 : yes # 0 : No
Cfg.unit_norm_used = "l1"
Cfg.gcn = 1 # 1 : yes # 0 : No
Cfg.zca_whitening = 0 # 1 : yes # 0 : No
Cfg.pca = 0 # 1 for yes # 0 : No
Cfg.mnist_val_frac = 0.1
Cfg.mnist_normal = 4
Cfg.mnist_outlier = -1
# SVM parameters
Cfg.svm_nu = 0.1
Cfg.svm_GridSearchCV = 1
for seed in RANDOM_SEED:
# plot parameters
# Cfg.xp_path = REPORT_SAVE_PATH
# dataset
Cfg.seed = seed
# initialize OC-SVM
ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf")
# train OC-SVM model
ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV)
# predict scores
auc_roc = ocsvm.predict(which_set='test')
print("========================================================================",)
print("AUROC: ",auc_roc)
print("========================================================================",)
AUC.append(auc_roc)
print("===========AURO Computed============================")
print("AUROC computed ", AUC)
auc_roc_mean = np.mean(np.asarray(AUC))
auc_roc_std = np.std(np.asarray(AUC))
print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100)
print("========================================================================")
```
##** MNIST 5 Vs All **##
```
## Obtaining the training and testing data
%reload_ext autoreload
%autoreload 2
import numpy as np
from src.config import Configuration as Cfg
from src.models.svm import SVM
DATASET = "mnist"
ESTIMATORS = 100
MAX_SAMPLES = 250
CONTAMINATION = 0.1
MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/"
REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/"
PRETRAINED_WT_PATH = ""
RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11]
AUC = []
## Setting the required config values
Cfg.out_frac = 0.1
Cfg.ad_experiment = 1 # 1 : yes # 0 : No
Cfg.unit_norm_used = "l1"
Cfg.gcn = 1 # 1 : yes # 0 : No
Cfg.zca_whitening = 0 # 1 : yes # 0 : No
Cfg.pca = 0 # 1 for yes # 0 : No
Cfg.mnist_val_frac = 0.1
Cfg.mnist_normal = 5
Cfg.mnist_outlier = -1
# SVM parameters
Cfg.svm_nu = 0.1
Cfg.svm_GridSearchCV = 1
for seed in RANDOM_SEED:
# plot parameters
# Cfg.xp_path = REPORT_SAVE_PATH
# dataset
Cfg.seed = seed
# initialize OC-SVM
ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf")
# train OC-SVM model
ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV)
# predict scores
auc_roc = ocsvm.predict(which_set='test')
print("========================================================================",)
print("AUROC: ",auc_roc)
print("========================================================================",)
AUC.append(auc_roc)
print("===========AURO Computed============================")
print("AUROC computed ", AUC)
auc_roc_mean = np.mean(np.asarray(AUC))
auc_roc_std = np.std(np.asarray(AUC))
print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100)
print("========================================================================")
```
##** MNIST 6 Vs All **##
```
## Obtaining the training and testing data
%reload_ext autoreload
%autoreload 2
import numpy as np
from src.config import Configuration as Cfg
from src.models.svm import SVM
DATASET = "mnist"
ESTIMATORS = 100
MAX_SAMPLES = 250
CONTAMINATION = 0.1
MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/"
REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/"
PRETRAINED_WT_PATH = ""
RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11]
AUC = []
## Setting the required config values
Cfg.out_frac = 0.1
Cfg.ad_experiment = 1 # 1 : yes # 0 : No
Cfg.unit_norm_used = "l1"
Cfg.gcn = 1 # 1 : yes # 0 : No
Cfg.zca_whitening = 0 # 1 : yes # 0 : No
Cfg.pca = 0 # 1 for yes # 0 : No
Cfg.mnist_val_frac = 0.1
Cfg.mnist_normal = 6
Cfg.mnist_outlier = -1
# SVM parameters
Cfg.svm_nu = 0.1
Cfg.svm_GridSearchCV = 1
for seed in RANDOM_SEED:
# plot parameters
# Cfg.xp_path = REPORT_SAVE_PATH
# dataset
Cfg.seed = seed
# initialize OC-SVM
ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf")
# train OC-SVM model
ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV)
# predict scores
auc_roc = ocsvm.predict(which_set='test')
print("========================================================================",)
print("AUROC: ",auc_roc)
print("========================================================================",)
AUC.append(auc_roc)
print("===========AURO Computed============================")
print("AUROC computed ", AUC)
auc_roc_mean = np.mean(np.asarray(AUC))
auc_roc_std = np.std(np.asarray(AUC))
print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100)
print("========================================================================")
```
##** MNIST 7 Vs All **##
```
## Obtaining the training and testing data
%reload_ext autoreload
%autoreload 2
import numpy as np
from src.config import Configuration as Cfg
from src.models.svm import SVM
DATASET = "mnist"
ESTIMATORS = 100
MAX_SAMPLES = 250
CONTAMINATION = 0.1
MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/"
REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/"
PRETRAINED_WT_PATH = ""
RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11]
AUC = []
## Setting the required config values
Cfg.out_frac = 0.1
Cfg.ad_experiment = 1 # 1 : yes # 0 : No
Cfg.unit_norm_used = "l1"
Cfg.gcn = 1 # 1 : yes # 0 : No
Cfg.zca_whitening = 0 # 1 : yes # 0 : No
Cfg.pca = 0 # 1 for yes # 0 : No
Cfg.mnist_val_frac = 0.1
Cfg.mnist_normal = 7
Cfg.mnist_outlier = -1
# SVM parameters
Cfg.svm_nu = 0.1
Cfg.svm_GridSearchCV = 1
for seed in RANDOM_SEED:
# plot parameters
# Cfg.xp_path = REPORT_SAVE_PATH
# dataset
Cfg.seed = seed
# initialize OC-SVM
ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf")
# train OC-SVM model
ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV)
# predict scores
auc_roc = ocsvm.predict(which_set='test')
print("========================================================================",)
print("AUROC: ",auc_roc)
print("========================================================================",)
AUC.append(auc_roc)
print("===========AURO Computed============================")
print("AUROC computed ", AUC)
auc_roc_mean = np.mean(np.asarray(AUC))
auc_roc_std = np.std(np.asarray(AUC))
print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100)
print("========================================================================")
```
##** MNIST 8 Vs All **##
```
## Obtaining the training and testing data
%reload_ext autoreload
%autoreload 2
import numpy as np
from src.config import Configuration as Cfg
from src.models.svm import SVM
DATASET = "mnist"
ESTIMATORS = 100
MAX_SAMPLES = 250
CONTAMINATION = 0.1
MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/"
REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/"
PRETRAINED_WT_PATH = ""
RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11]
AUC = []
## Setting the required config values
Cfg.out_frac = 0.1
Cfg.ad_experiment = 1 # 1 : yes # 0 : No
Cfg.unit_norm_used = "l1"
Cfg.gcn = 1 # 1 : yes # 0 : No
Cfg.zca_whitening = 0 # 1 : yes # 0 : No
Cfg.pca = 0 # 1 for yes # 0 : No
Cfg.mnist_val_frac = 0.1
Cfg.mnist_normal = 8
Cfg.mnist_outlier = -1
# SVM parameters
Cfg.svm_nu = 0.1
Cfg.svm_GridSearchCV = 1
for seed in RANDOM_SEED:
# plot parameters
# Cfg.xp_path = REPORT_SAVE_PATH
# dataset
Cfg.seed = seed
# initialize OC-SVM
ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf")
# train OC-SVM model
ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV)
# predict scores
auc_roc = ocsvm.predict(which_set='test')
print("========================================================================",)
print("AUROC: ",auc_roc)
print("========================================================================",)
AUC.append(auc_roc)
print("===========AURO Computed============================")
print("AUROC computed ", AUC)
auc_roc_mean = np.mean(np.asarray(AUC))
auc_roc_std = np.std(np.asarray(AUC))
print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100)
print("========================================================================")
```
##** MNIST 9 Vs All **##
```
## Obtaining the training and testing data
%reload_ext autoreload
%autoreload 2
import numpy as np
from src.config import Configuration as Cfg
from src.models.svm import SVM
DATASET = "mnist"
ESTIMATORS = 100
MAX_SAMPLES = 250
CONTAMINATION = 0.1
MODEL_SAVE_PATH = PROJECT_DIR + "/models/mnist/ISO_FOREST/"
REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/mnist/ISO_FOREST/"
PRETRAINED_WT_PATH = ""
RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11]
AUC = []
## Setting the required config values
Cfg.out_frac = 0.1
Cfg.ad_experiment = 1 # 1 : yes # 0 : No
Cfg.unit_norm_used = "l1"
Cfg.gcn = 1 # 1 : yes # 0 : No
Cfg.zca_whitening = 0 # 1 : yes # 0 : No
Cfg.pca = 0 # 1 for yes # 0 : No
Cfg.mnist_val_frac = 0.1
Cfg.mnist_normal = 9
Cfg.mnist_outlier = -1
# SVM parameters
Cfg.svm_nu = 0.1
Cfg.svm_GridSearchCV = 1
for seed in RANDOM_SEED:
# plot parameters
# Cfg.xp_path = REPORT_SAVE_PATH
# dataset
Cfg.seed = seed
# initialize OC-SVM
ocsvm = SVM(loss="OneClassSVM", dataset=DATASET, kernel="rbf")
# train OC-SVM model
ocsvm.fit(GridSearch=Cfg.svm_GridSearchCV)
# predict scores
auc_roc = ocsvm.predict(which_set='test')
print("========================================================================",)
print("AUROC: ",auc_roc)
print("========================================================================",)
AUC.append(auc_roc)
print("===========AURO Computed============================")
print("AUROC computed ", AUC)
auc_roc_mean = np.mean(np.asarray(AUC))
auc_roc_std = np.std(np.asarray(AUC))
print ("AUROC =====", auc_roc_mean*100 ,"+/-",auc_roc_std*100)
print("========================================================================")
```
| github_jupyter |
# Vector Norm
```
import numpy as np
from scipy import signal
from scipy.spatial import distance
A = np.array([1+1j, 2+2j, 3+3j, 4+4j, 5+5j])
B = np.array([6-6j, 7-7j, 8-8j, 9-9j, 10-10j])
C = np.array([2,3,5,7,11])
Z = np.array([0,0,0,0,0])
D = np.array([A,B])
```
For every complex inner product space V(-,-), we can define a norm or length which is a function defined as
\begin{align}
| |: V -> E
\end{align}
defined as
\begin{align}
|V| = |\sqrt{V . V}|
\end{align}
```
[
np.linalg.norm(A) == np.abs(np.sqrt(np.dot(A,A))),
np.linalg.norm(B) == np.abs(np.sqrt(np.dot(B,B))),
np.linalg.norm(C) == np.abs(np.sqrt(np.dot(C,C)))
]
[
np.linalg.norm(A),
np.linalg.norm(B),
np.linalg.norm(C),
]
```
# Vector Distance
For every complex inner product space V(-,-), we can define a distance function
\begin{align}
d(,) : V x V -> E
\end{align}
where
\begin{align}
d(V1,V2) : |V1 - V2| = \sqrt{V1-V2, V1-V2}
\end{align}
```
distance.euclidean(A, B)
np.linalg.norm(A-B) == distance.euclidean(A, B)
np.round( distance.euclidean(A, B), 10) == \
np.round( np.abs(np.sqrt(np.dot(A,A)-np.dot(B,B))), 10)
```
Distance is symmetric: d(V, W) = d(W, V)
```
distance.euclidean(A, B) == distance.euclidean(B, A)
```
Distance satisfies the triangle inequality: d(U, V) ≤ d(U, W) + d(W, V)
```
distance.euclidean(A, C), distance.euclidean(A, B) + distance.euclidean(B, C)
distance.euclidean(A, C) <= distance.euclidean(A, B) + distance.euclidean(B, C)
```
Distance is nondegenerate: d(V, W) > 0 if V ≠ W and d(V, V) = 0.
```
distance.euclidean(Z,Z)
distance.euclidean(A,Z), distance.euclidean(A,Z) > 0
```
## Orthogonal Vectors
The dot product of orthogonal vectors is zero
```
X = np.array([1,0])
Y = np.array([0,1])
np.dot(X,Y)
```
## Kronecker Delta
δj,k is called the Kronecker delta function.
δj,k =
1 (if i == j);
0 (if i != j);
```
M = np.matrix([[1,2,3],[4,5,6],[7,8,9]]); X
{ "shape": M.shape, "size": M.size }
def kronecker_delta(matrix):
output = np.copy(matrix)
for i in range(0, matrix.shape[0]):
for j in range(0, matrix.shape[1]):
output[i,j] = output[i,j] if i == j else 0
return output
kronecker_delta(M)
```
It is equlivant to element wise multiplication by the identity matrx
```
np.multiply(M, np.identity(3))
kronecker_delta(M) == np.multiply(M, np.identity(M.shape[0]))
```
NOTE: np.kron is the Kronecker (tensor) product function, and not the Kronecker DELTA
```
np.kron(M,M)
```
| github_jupyter |
# COVID-19 Drug Repurposing via gene-compounds relations
This example shows how to do drug repurposing using DRKG even with the pretrained model.
## Collecting COVID-19 related disease
At the very beginning we need to collect a list of associated genes for Corona-Virus(COV) in DRKG.
```
import pandas as pd
import numpy as np
file='coronavirus-related-host-genes.tsv'
df = pd.read_csv(file, sep="\t")
cov_genes = np.unique(df.values[:,2]).tolist()
file='covid19-host-genes.tsv'
df = pd.read_csv(file, sep="\t")
cov2_genes = np.unique(df.values[:,2]).tolist()
# keep unique related genes
cov_related_genes=list(set(cov_genes+cov2_genes))
#cov_related_genes=list(set(cov2_genes))
print(len(cov_related_genes))
```
## Candidate drugs
Now we use FDA-approved drugs in Drugbank as candidate drugs. (we exclude drugs with molecule weight < 250) The drug list is in infer\_drug.tsv
```
import csv
# Load entity file
drug_list = []
with open("./infer_drug.tsv", newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t', fieldnames=['drug','ids'])
for row_val in reader:
drug_list.append(row_val['drug'])
len(drug_list)
```
## Inhibits relation
One inhibit relation in this context
```
treatment = ['GNBR::N::Compound:Gene']#'DRUGBANK::target::Compound:Gene','DGIDB::INHIBITOR::Gene:Compound']
```
## Get pretrained model
We can directly use the pretrianed model to do drug repurposing.
```
import pandas as pd
import numpy as np
import sys
import csv
sys.path.insert(1, '../utils')
from utils import download_and_extract
download_and_extract()
entity_idmap_file = '../data/drkg/embed/entities.tsv'
relation_idmap_file = '../data/drkg/embed/relations.tsv'
```
## Get embeddings for genes and drugs
```
# Get drugname/disease name to entity ID mappings
entity_map = {}
entity_id_map = {}
relation_map = {}
with open(entity_idmap_file, newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t', fieldnames=['name','id'])
for row_val in reader:
entity_map[row_val['name']] = int(row_val['id'])
entity_id_map[int(row_val['id'])] = row_val['name']
with open(relation_idmap_file, newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t', fieldnames=['name','id'])
for row_val in reader:
relation_map[row_val['name']] = int(row_val['id'])
# handle the ID mapping
drug_ids = []
gene_ids = []
for drug in drug_list:
drug_ids.append(entity_map[drug])
for gene in cov_related_genes:
gene_ids.append(entity_map[gene])
treatment_rid = [relation_map[treat] for treat in treatment]
# Load embeddings
import torch as th
entity_emb = np.load('../data/drkg/embed/DRKG_TransE_l2_entity.npy')
rel_emb = np.load('../data/drkg/embed/DRKG_TransE_l2_relation.npy')
drug_ids = th.tensor(drug_ids).long()
gene_ids = th.tensor(gene_ids).long()
treatment_rid = th.tensor(treatment_rid)
drug_emb = th.tensor(entity_emb[drug_ids])
treatment_embs = [th.tensor(rel_emb[rid]) for rid in treatment_rid]
```
## Drug Repurposing Based on Edge Score
We use following algorithm to calculate the edge score. Note, here we use logsigmiod to make all scores < 0. The larger the score is, the stronger the $h$ will have $r$ with $t$.
$\mathbf{d} = \gamma - ||\mathbf{h}+\mathbf{r}-\mathbf{t}||_{2}$
$\mathbf{score} = \log\left(\frac{1}{1+\exp(\mathbf{-d})}\right)$
When doing drug repurposing, we only use the treatment related relations.
```
import torch.nn.functional as fn
gamma=12.0
def transE_l2(head, rel, tail):
score = head + rel - tail
return gamma - th.norm(score, p=2, dim=-1)
scores_per_gene = []
dids_per_gene = []
for rid in range(len(treatment_embs)):
treatment_emb=treatment_embs[rid]
for gene_id in gene_ids:
gene_emb = th.tensor(entity_emb[gene_id])
if treatment[rid]=='DGIDB::INHIBITOR::Gene:Compound':
score = fn.logsigmoid(transE_l2(gene_emb, treatment_emb,
drug_emb))
else:
score = fn.logsigmoid(transE_l2(drug_emb, treatment_emb,
gene_emb))
scores_per_gene.append(score)
dids_per_gene.append(drug_ids)
scores = th.cat(scores_per_gene)
dids = th.cat(dids_per_gene)
```
### Check clinical trial drugs per gene
Here we load the clinical trial drugs
```
clinical_drugs_file = './COVID19_clinical_trial_drugs.tsv'
clinical_drug_map = {}
with open(clinical_drugs_file, newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t', fieldnames=['id', 'drug_name','drug_id'])
for row_val in reader:
clinical_drug_map[row_val['drug_id']] = row_val['drug_name']
```
Next we measure some statistics per gene.
```
maxhit=0
drugs_in_top_k={}
drugsfr_in_top_k={}
for i in range(len(scores_per_gene)):
score=scores_per_gene[i]
did=dids_per_gene[i]
idx = th.flip(th.argsort(score), dims=[0])
score = score[idx].numpy()
did = did[idx].numpy()
#print(did)
_, unique_indices = np.unique(did, return_index=True)
topk=100
topk_indices = np.sort(unique_indices)[:topk]
proposed_did = did[topk_indices]
proposed_score = score[topk_indices]
found_in_top_k=0
found_drugs="\n"
for j in range(topk):
drug = entity_id_map[int(proposed_did[j])][10:17]
if clinical_drug_map.get(drug, None) is not None:
found_in_top_k+=1
score = proposed_score[j]
if drug in drugs_in_top_k:
drugs_in_top_k[drug]+=1
drugsfr_in_top_k[drug]+=1/(j+1)
else:
drugs_in_top_k[drug]=1
drugsfr_in_top_k[drug]=1/(j+1)
found_drugs+="[{}]{}\n".format(j, clinical_drug_map[drug])
#print("[{}]{}".format(j, clinical_drug_map[drug]))
#print("{}\t{}".format(cov_related_genes[i], found_in_top_k))
if maxhit< found_in_top_k:
maxhit=found_in_top_k
maxgene=cov_related_genes[i]
max_dugs=found_drugs
print("{}\t{}\t{}".format(maxgene, maxhit,max_dugs))
res=[[drug, clinical_drug_map[drug] ,drugs_in_top_k[drug],drugsfr_in_top_k[drug]] for drug in drugs_in_top_k.keys()]
res=reversed(sorted(res, key=lambda x : x[2]))
for drug in res:
print("{}\t{}\t{}\t{}".format(drug[0], drug[1] ,drug[2],drug[3]))
```
| github_jupyter |
```
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell
# install NeMo
BRANCH = 'main'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]
# If you're not using Colab, you might need to upgrade jupyter notebook to avoid the following error:
# 'ImportError: IProgress not found. Please update jupyter and ipywidgets.'
! pip install ipywidgets
! jupyter nbextension enable --py widgetsnbextension
# Please restart the kernel after running this cell
from nemo.collections import nlp as nemo_nlp
from nemo.utils.exp_manager import exp_manager
import os
import wget
import torch
import pytorch_lightning as pl
from omegaconf import OmegaConf
```
In this tutorial, we are going to describe how to finetune BioMegatron - a [BERT](https://arxiv.org/abs/1810.04805)-like [Megatron-LM](https://arxiv.org/pdf/1909.08053.pdf) model pre-trained on large biomedical text corpus ([PubMed](https://pubmed.ncbi.nlm.nih.gov/) abstracts and full-text commercial use collection) - on [RE: Text mining chemical-protein interactions (CHEMPROT)](https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vi/track-5/).
The model size of Megatron-LM can be larger than BERT, up to multi-billion parameters, compared to 345 million parameters of BERT-large.
There are some alternatives of BioMegatron, most notably [BioBERT](https://arxiv.org/abs/1901.08746). Compared to BioBERT BioMegatron is larger by model size and pre-trained on larger text corpus.
A more general tutorial of using BERT-based models, including Megatron-LM, for downstream natural language processing tasks can be found [here](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/nlp/01_Pretrained_Language_Models_for_Downstream_Tasks.ipynb).
# Task Description
**Relation Extraction (RE)** can be regarded as a type of sentence classification.
The task is to classify the relation of a [GENE] and [CHEMICAL] in a sentence, for example like the following:
```html
14967461.T1.T22 <@CHEMICAL$> inhibitors currently under investigation include the small molecules <@GENE$> (Iressa, ZD1839) and erlotinib (Tarceva, OSI-774), as well as monoclonal antibodies such as cetuximab (IMC-225, Erbitux). <CPR:4>
14967461.T2.T22 <@CHEMICAL$> inhibitors currently under investigation include the small molecules gefitinib (<@GENE$>, ZD1839) and erlotinib (Tarceva, OSI-774), as well as monoclonal antibodies such as cetuximab (IMC-225, Erbitux). <CPR:4>
```
to one of the following class:
| Relation Class | Relations |
| ----------- | ----------- |
| CPR:3 | Upregulator and activator |
| CPR:4 | Downregulator and inhibitor |
| CPR:5 | Agonist |
| CPR:6 | Antagonist |
| CPR:9 | Substrate and product of |
# Datasets
Details of ChemProt Relation Extraction task and the original data can be found on the [BioCreative VI website](https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vi/track-5/)
ChemProt dataset pre-processed for easier consumption can be downloaded from [here](https://github.com/arwhirang/recursive_chemprot/blob/master/Demo/tree_LSTM/data/chemprot-data_treeLSTM.zip) or [here](https://github.com/ncbi-nlp/BLUE_Benchmark/releases/download/0.1/bert_data.zip)
```
TASK = 'ChemProt'
DATA_DIR = os.path.join(os.getcwd(), 'DATA_DIR')
RE_DATA_DIR = os.path.join(DATA_DIR, 'RE')
WORK_DIR = os.path.join(os.getcwd(), 'WORK_DIR')
MODEL_CONFIG = 'text_classification_config.yaml'
os.makedirs(DATA_DIR, exist_ok=True)
os.makedirs(os.path.join(DATA_DIR, 'RE'), exist_ok=True)
os.makedirs(WORK_DIR, exist_ok=True)
# download the dataset
wget.download('https://github.com/arwhirang/recursive_chemprot/blob/master/Demo/tree_LSTM/data/chemprot-data_treeLSTM.zip?raw=true',
os.path.join(DATA_DIR, 'data_re.zip'))
!unzip -o {DATA_DIR}/data_re.zip -d {RE_DATA_DIR}
! ls -l $RE_DATA_DIR
```
## Pre-process dataset
Let's convert the dataset into the format that is compatible for [NeMo text-classification module](https://github.com/NVIDIA/NeMo/blob/stable/examples/nlp/text_classification/text_classification_with_bert.py).
```
wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/text_classification/data/import_datasets.py')
! python import_datasets.py --dataset_name=chemprot --source_data_dir={RE_DATA_DIR} --target_data_dir={RE_DATA_DIR}
# let's take a look at the training data
! head -n 5 {RE_DATA_DIR}/train.tsv
# let's check the label mapping
! cat {RE_DATA_DIR}/label_mapping.tsv
```
It is not necessary to have the mapping exactly like this - it can be different.
We use the same [mapping used by BioBERT](https://github.com/dmis-lab/biobert/blob/master/run_re.py#L438) so that comparison can be more straightforward.
# Model configuration
Now, let's take a closer look at the model's configuration and learn to train the model.
The model is defined in a config file which declares multiple important sections. They are:
- **model**: All arguments that are related to the Model - language model, a classifier, optimizer and schedulers, datasets and any other related information
- **trainer**: Any argument to be passed to PyTorch Lightning
```
# download the model's configuration file
config_dir = WORK_DIR + '/configs/'
os.makedirs(config_dir, exist_ok=True)
if not os.path.exists(config_dir + MODEL_CONFIG):
print('Downloading config file...')
wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/text_classification/conf/' + MODEL_CONFIG, config_dir)
else:
print ('config file is already exists')
# this line will print the entire config of the model
config_path = f'{WORK_DIR}/configs/{MODEL_CONFIG}'
print(config_path)
config = OmegaConf.load(config_path)
config.model.train_ds.file_path = os.path.join(RE_DATA_DIR, 'train.tsv')
config.model.validation_ds.file_path = os.path.join(RE_DATA_DIR, 'dev.tsv')
config.model.task_name = 'chemprot'
# Note: these are small batch-sizes - increase as appropriate to available GPU capacity
config.model.train_ds.batch_size=8
config.model.validation_ds.batch_size=8
config.model.dataset.num_classes=6
print(OmegaConf.to_yaml(config))
```
# Model Training
## Setting up Data within the config
Among other things, the config file contains dictionaries called **dataset**, **train_ds** and **validation_ds**. These are configurations used to setup the Dataset and DataLoaders of the corresponding config.
We assume that both training and evaluation files are located in the same directory, and use the default names mentioned during the data download step.
So, to start model training, we simply need to specify `model.dataset.data_dir`, like we are going to do below.
Also notice that some config lines, including `model.dataset.data_dir`, have `???` in place of paths, this means that values for these fields are required to be specified by the user.
Let's now add the data directory path, task name and output directory for saving predictions to the config.
```
config.model.task_name = TASK
config.model.output_dir = WORK_DIR
config.model.dataset.data_dir = RE_DATA_DIR
```
## Building the PyTorch Lightning Trainer
NeMo models are primarily PyTorch Lightning modules - and therefore are entirely compatible with the PyTorch Lightning ecosystem.
Let's first instantiate a Trainer object
```
print("Trainer config - \n")
print(OmegaConf.to_yaml(config.trainer))
# lets modify some trainer configs
# checks if we have GPU available and uses it
cuda = 1 if torch.cuda.is_available() else 0
config.trainer.gpus = cuda
# for PyTorch Native AMP set precision=16
config.trainer.precision = 16 if torch.cuda.is_available() else 32
# remove distributed training flags
config.trainer.accelerator = None
trainer = pl.Trainer(**config.trainer)
```
## Setting up a NeMo Experiment
NeMo has an experiment manager that handles logging and checkpointing for us, so let's use it:
```
config.exp_manager.exp_dir = WORK_DIR
exp_dir = exp_manager(trainer, config.get("exp_manager", None))
# the exp_dir provides a path to the current experiment for easy access
exp_dir = str(exp_dir)
exp_dir
```
Before initializing the model, we might want to modify some of the model configs. Here we are modifying it to use BioMegatron, [Megatron-LM BERT](https://arxiv.org/abs/1909.08053) pre-trained on [PubMed](https://pubmed.ncbi.nlm.nih.gov/) biomedical text corpus.
```
# complete list of supported BERT-like models
print(nemo_nlp.modules.get_pretrained_lm_models_list())
# specify BERT-like model, you want to use, for example, "megatron-bert-345m-uncased" or 'bert-base-uncased'
PRETRAINED_BERT_MODEL = "biomegatron-bert-345m-uncased"
# add the specified above model parameters to the config
config.model.language_model.pretrained_model_name = PRETRAINED_BERT_MODEL
```
Now, we are ready to initialize our model. During the model initialization call, the dataset and data loaders we'll be prepared for training and evaluation.
Also, the pretrained BERT model will be downloaded, note it can take up to a few minutes depending on the size of the chosen BERT model.
```
model = nemo_nlp.models.TextClassificationModel(cfg=config.model, trainer=trainer)
```
## Monitoring training progress
Optionally, you can create a Tensorboard visualization to monitor training progress.
If you're not using Colab, refer to [https://www.tensorflow.org/tensorboard/tensorboard_in_notebooks](https://www.tensorflow.org/tensorboard/tensorboard_in_notebooks) if you're facing issues with running the cell below.
```
try:
from google import colab
COLAB_ENV = True
except (ImportError, ModuleNotFoundError):
COLAB_ENV = False
# Load the TensorBoard notebook extension
if COLAB_ENV:
%load_ext tensorboard
%tensorboard --logdir {exp_dir}
else:
print("To use tensorboard, please use this notebook in a Google Colab environment.")
# start model training
trainer.fit(model)
```
## Training Script
If you have NeMo installed locally, you can also train the model with `examples/nlp/text_classification/text_classification_with_bert.py.`
To run training script, use:
`python text_classification_with_bert.py \
model.dataset.data_dir=PATH_TO_DATA_DIR \
model.task_name=TASK`
The training could take several minutes and the results should look something like:
```
precision recall f1-score support
0 0.7328 0.8348 0.7805 115
1 0.9402 0.9291 0.9346 7950
2 0.8311 0.9146 0.8708 199
3 0.6400 0.6302 0.6351 457
4 0.8002 0.8317 0.8156 1093
5 0.7228 0.7518 0.7370 548
accuracy 0.8949 10362
macro avg 0.7778 0.8153 0.7956 10362
weighted avg 0.8963 0.8949 0.8954 10362
```
| github_jupyter |
```
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
matplotlib.__version__, np.__version__, pd.__version__
```
## 2 Plots side by side
```
plt.clf()
# sample data
x = np.linspace(0.0,100,50)
y = np.random.uniform(low=0,high=10,size=50)
# create figure and axes
fig, axes = plt.subplots(1,2)
ax1 = axes[0]
ax2 = axes[1]
# just plot things on each individual axes
ax1.scatter(x,y,c='red',marker='+')
ax2.bar(x,y)
plt.gcf().set_size_inches(10,5)
plt.show()
```
## 2 plots one on top of the other
```
plt.clf()
# sample data
x = np.linspace(0.0,100,50)
y = np.random.uniform(low=0,high=10,size=50)
# create figure and axes
fig, axes = plt.subplots(2,1)
ax1 = axes[0]
ax2 = axes[1]
# just plot things on each individual axes
ax1.scatter(x,y,c='red',marker='+')
ax2.bar(x,y)
plt.gcf().set_size_inches(5,5)
plt.show()
```
## 4 plots in a grid
```
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0.0,100,50)
y = np.random.uniform(low=0,high=10,size=50)
# plt.subplots returns an array of arrays. We can
# directly assign those to variables directly
# like this
fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)
# just plot things on each individual axes
ax1.scatter(x,y,c='red',marker='+')
ax2.bar(x,y)
ax3.scatter(x,y,marker='x')
ax4.barh(x,y)
plt.gcf().set_size_inches(5,5)
plt.show()
```
## Pandas plots
```
import matplotlib.pyplot as plt
import pandas as pd
df = pd.DataFrame({
'string_col':['foo','bar','baz','quux'],
'x':[10,20,30,40],
'y':[1,2,3,4]
})
df
plt.clf()
# plt.subplots returns an array of arrays. We can
# directly assign those to variables directly
fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)
# bar plot for column 'x'
df.plot(y='x', kind='bar', ax=ax1)
ax1.set_xlabel('index')
# horizontal bar plot for column 'y'
df.plot(y='y', kind='bar', ax=ax2, color='orange')
ax2.set_xlabel('index')
# both columns in a scatter plot
df.plot('x','y', kind='scatter', ax=ax3)
# to have two lines, plot twice in the same axis
df.plot(y='x', kind='line', ax=ax4)
df.plot(y='y', kind='line', ax=ax4)
ax4.set_xlabel('index')
plt.subplots_adjust(wspace=0.3, hspace=0.5)
plt.show()
```
## Set subplot title
```
plt.clf()
# plt.subplots returns an array of arrays. We can
# directly assign those to variables directly
fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)
# sample data
x = np.linspace(0.0,100,50)
y = np.random.uniform(low=0,high=10,size=50)
# plot individual subplots
ax1.bar(x,y)
ax2.bar(x,y)
ax3.scatter(x,y)
ax4.plot(x)
ax4.set_title('This is Plot 4',size=14)
plt.subplots_adjust(wspace=0.3, hspace=0.5)
plt.show()
```
## Padding
```
import numpy as np
import matplotlib.pyplot as plt
# sample data
x = np.linspace(0.0,100,50)
y = np.random.uniform(low=0,high=10,size=50)
# plt.subplots returns an array of arrays. We can
# directly assign those to variables directly
fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)
# just plot things on each individual axes
ax1.scatter(x,y,c='red',marker='+')
ax2.bar(x,y)
ax3.scatter(x,y,marker='x')
ax4.barh(x,y)
# here, set the width and the height between the subplots
# the default value is 0.2 for each
plt.subplots_adjust(wspace=0.50, hspace=1.0)
plt.show()
```
## Align axes
```
import numpy as np
import matplotlib.pyplot as plt
plt.clf()
# plt.subplots returns an array of arrays. We can
# directly assign those to variables directly
fig, ((ax1,ax2)) = plt.subplots(1,2)
np.random.seed(42)
x = np.linspace(0.0,100,50)
# sample data in different magnitudes
y1 = np.random.normal(loc=10, scale=2, size=10)
y2 = np.random.normal(loc=20, scale=2, size=10)
ax1.plot(y1)
ax2.plot(y2)
ax1.grid(True,alpha=0.3)
ax2.grid(True,alpha=0.3)
ax1.set_ylim(0,25)
ax2.set_ylim(0,25)
plt.subplots_adjust(wspace=0.3, hspace=0.5)
plt.show()
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
```
# Train your first neural network
<table align="left"><td>
<a target="_blank" href="https://colab.sandbox.google.com/github/tensorflow/models/blob/master/samples/core/get_started/basic_classification.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td><td>
<a target="_blank" href="https://github.com/tensorflow/models/blob/master/samples/core/get_started/basic_classification.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on Github</a></td></table>
In this guide, we will train a neural network model to classify images of clothing, like sneakers and shirts. It's fine if you don't understand all the details, this is a fast-paced overview of a complete TensorFlow program with the details explained as we go.
This guide uses [tf.keras](https://www.tensorflow.org/programmers_guide/keras), a high-level API to build and train models in TensorFlow.
```
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
```
## Import the Fashion MNIST dataset
This guide uses the [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset which contains 70,000 grayscale images in 10 categories. The images show individual articles of clothing at low resolution (28 by 28 pixels), as seen here:
<table>
<tr><td>
<img src="https://tensorflow.org/images/fashion-mnist-sprite.png"
alt="Fashion MNIST sprite" width="600">
</td></tr>
<tr><td align="center">
<b>Figure 1.</b> <a href="https://github.com/zalandoresearch/fashion-mnist">Fashion-MNIST samples</a> (by Zalando, MIT License).<br/>
</td></tr>
</table>
Fashion MNIST is intended as a drop-in replacement for the classic [MNIST](http://yann.lecun.com/exdb/mnist/) dataset—often used as the "Hello, World" of machine learning programs for computer vision. The MNIST dataset contains images of handwritten digits (0, 1, 2, etc) in an identical format to the articles of clothing we'll use here.
This guide uses Fashion MNIST for variety, and because it's a slightly more challenging problem than regular MNIST. Both datasets are relatively small and are useful to verify that an algorithm works as expected. They're good starting points to test and debug code.
We will use 60,000 images to train the network and 10,000 images to evaluate how accurately the network learned to classify images. You can acess the Fashon MNIST directly from TensorFlow, just import and load the data:
```
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
```
Loading the dataset returns four NumPy arrays:
* The `train_images` and `train_labels` arrays are the *training set*, this is the data the model uses to learn.
* The model is tested against the *test set*, the `test_images` and `test_labels` arrays.
The images are 28x28 numpy arrays, with pixel values ranging between 0 and 255. The *labels* are an array of integers, ranging from 0 to 9. These correspond to the *class* of clothing the image represents:
<table>
<tr>
<th>Label</th>
<th>Class</th>
</tr>
<tr>
<td>0</td>
<td>T-shirt/top</td>
</tr>
<tr>
<td>1</td>
<td>Trouser</td>
</tr>
<tr>
<td>2</td>
<td>Pullover</td>
</tr>
<tr>
<td>3</td>
<td>Dress</td>
</tr>
<tr>
<td>4</td>
<td>Coat</td>
</tr>
<tr>
<td>5</td>
<td>Sandal</td>
</tr>
<tr>
<td>6</td>
<td>Shirt</td>
</tr>
<tr>
<td>7</td>
<td>Sneaker</td>
</tr>
<tr>
<td>8</td>
<td>Bag</td>
</tr>
<tr>
<td>9</td>
<td>Ankle boot</td>
</tr>
</table>
Each image is mapped to a single label. Since the *class names* are not included with the dataset, store them here to use later when plotting the images:
```
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
```
## Explore the data
Let's explore the format of the dataset before training the model. The following shows there are 60,000 images in the training set, with each image represented as 28 x 28 pixels:
```
train_images.shape
```
Likewise, there are 60,000 labels in the training set:
```
len(train_labels)
```
Each label is an integer between 0 and 9:
```
train_labels
```
There are 10,000 images in the test set. Again, each image is represented as 28 x 28 pixels:
```
test_images.shape
```
And the test set contains 10,000 images labels:
```
len(test_labels)
```
## Preprocess the data
The data must be preprocessed before training the network. If you inspect the first image in the training set, you will see that the pixel values fall in the range of 0 to 255:
```
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.gca().grid(False)
```
We will scale these values to a range of 0 to 1 before feeding to the neural network model. For this, cast the datatype of the image components from and integer to a float, and divide by 255. Here's the function to preprocess the images:
It's important that the *training set* and the *testing set* are preprocessed in the same way:
```
train_images = train_images / 255.0
test_images = test_images / 255.0
```
Display the first 25 images from the *training set* and display the class name below each image. Verify that the data is in the correct format and we're ready to build and train the network.
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid('off')
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
```
## Build the model
Building the neural network requires configuring the layers of the model, then compiling the model.
### Setup the layers
The basic building block of a neural network is the *layer*. Layers extract representations from the data fed into them. And, hopefully, these representations are more meaningful for the problem at hand.
Most of deep learning consists of chaining together simple layers. Most layers, like `tf.keras.layers.Dense`, have parameters that are learned during training.
```
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
```
The first layer in this network, `tf.keras.layers.Flatten`, transforms the format of the images from a 2d-array (of 28 by 28 pixels), to a 1d-array of 28 * 28 = 784 pixels. Think of this layer as unstacking rows of pixels in the image and lining them up. This layer has no parameters to learn, it only reformats the data.
After the pixels are flattened, the network consists of a sequence of two `tf.keras.layers.Dense` layers. These are densely-connected, or fully-connected, neural layers. The first `Dense` layer has 128 nodes, or neurons. The second (and last) layer is a 10-node *softmax* layer—this returns an array of 10 probability scores that sum to 1. Each node contains a score that indicates the probability that the current image belongs to one of the 10 digit classes.
### Compile the model
Before the model is ready for training, it needs a few more settings. These are added during the model's *compile* step:
* *Loss function* —This measures how accurate the model is during training. We want to minimize this function to "steer" the model in the right direction.
* *Optimizer* —This is how the model is updated based on the data it sees and its loss function.
* *Metrics* —Used to monitor the training and testing steps. The following example uses *accuracy*, the fraction of the images that are correctly classified.
```
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
```
## Train the model
Training the neural network model requires the following steps:
1. Feed the training data to the model—in this example, the `train_images` and `train_labels` arrays.
2. The model learns to associate images and labels.
3. We ask the model to make predictions about a test set—in this example, the `test_images` array. We verify that the predictions match the labels from the `test_labels` array..
To start training, call the `model.fit` method—the model is "fit" to the training data:
```
model.fit(train_images, train_labels, epochs=5)
```
As the model trains, the loss and accuracy metrics are displayed. This model reaches an accuracy of about 0.88 (or 88%) on the training data.
## Evaluate accuracy
Next, compare how the model performs on the test dataset:
```
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
```
It turns out, the accuracy on the test dataset is a little less than the accuracy on the training dataset. This gap between training accuracy and test accuracy is an example of *overfitting*. Overfitting is when a machine learning model performs worse on new data than on their training data.
## Make predictions
With the model trained, we can use it to make predictions about some images.
```
predictions = model.predict(test_images)
```
Here, the model has predicted the label for each image in the testing set. Let's take a look at the first prediction:
```
predictions[0]
```
A prediction is an array of 10 numbers. These describe the "confidence" of the model that the image corresponds to each of the 10 different articles of clothing. We can see see which label has the highest confidence value:
```
np.argmax(predictions[0])
```
So the model is most confident that this image is an ankle boot, or `class_names[9]`. And we can check the test label to see this is correct:
```
test_labels[0]
```
Let's plot several images with their predictions. Correct prediction labels are green and incorrect prediction labels are red.
```
# Plot the first 25 test images, their predicted label, and the true label
# Color correct predictions in green, incorrect predictions in red
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid('off')
plt.imshow(test_images[i], cmap=plt.cm.binary)
predicted_label = np.argmax(predictions[i])
true_label = test_labels[i]
if predicted_label == true_label:
color = 'green'
else:
color = 'red'
plt.xlabel("{} ({})".format(class_names[predicted_label],
class_names[true_label]),
color=color)
```
Finally, use the trained model to make a prediction about a single image.
```
# Grab an image from the test dataset
img = test_images[0]
print(img.shape)
```
`tf.keras` models are optimized to make predictions on a *batch*, or collection, of examples at once. So even though we're using a single image, we need to add it to a list:
```
# Add the image to a batch where it's the only member.
img = (np.expand_dims(img,0))
print(img.shape)
```
Now predict the image:
```
predictions = model.predict(img)
print(predictions)
```
`model.predict` returns a list of lists, one for each image in the batch of data. Grab the predictions for our (only) image in the batch:
```
prediction = predictions[0]
np.argmax(prediction)
```
And, as before, the model predicts a label of 9.
| github_jupyter |
```
import pandas as pd
import numpy as np
import os
import datetime
import simplejson
import git
import sys
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
def makeHMMUnSupData(Input, colname, fipsname):
#Takes input dataframe, and gives out HMM format of Input data, a list of lists
#of the colname value, each list in the set represents one fips code.
Output = []
for fips in Input[fipsname].unique():
temp = list(Input[Input[fipsname] == fips][colname])
Output.append(temp)
return Output
#Cumulative Death Data
NYT_tot = pd.read_csv(f"{homedir}/data/us/covid/nyt_us_counties.csv")
NYT_tot = NYT_tot.drop(columns=['county','state']).sort_values(['fips','date']).reset_index(drop=True)
NYT_tot = NYT_tot.dropna(subset=['fips'])
NYT_tot['fips'] = NYT_tot.fips.astype(int)
NYT_tot['date'] = pd.to_datetime(NYT_tot['date'])
NYT_tot['id'] = NYT_tot.fips.astype(str).str.cat(NYT_tot.date.astype(str), sep=', ')
#Making new parameter for deathrate
NYT_tot['deathrate'] = NYT_tot['deaths']/NYT_tot['cases']
NYT_tot = NYT_tot.fillna(0)
#multiplying death rate by 1000 to give integer state values
NYT_tot['deathstate'] = NYT_tot['deathrate']*1000
NYT_tot['deathstate'] = NYT_tot['deathstate'].astype(int)
#Differenced Daily Death Data
NYT_daily = pd.read_csv(f"{homedir}/data/us/covid/nyt_us_counties_daily.csv")
NYT_daily = NYT_daily.drop(columns=['county','state']).sort_values(['fips','date']).reset_index(drop=True)
NYT_daily['fips'] = NYT_daily.fips.astype(int)
NYT_daily['date'] = pd.to_datetime(NYT_daily['date'])
NYT_daily['id'] = NYT_daily.fips.astype(str).str.cat(NYT_daily.date.astype(str), sep=', ')
FirstDay = min(NYT_daily.date.unique())
LastDay = max(NYT_daily.date.unique())
#Making a time-warping of NYT daily data, so each county has a value at the starting day of 2020-01-21
# and then a final value at the most recent day
NYT_daily_Warp = NYT_daily
for fips in NYT_daily.fips.unique():
rows = NYT_daily[NYT_daily['fips'] == fips]
#adding in the first day values
if FirstDay not in rows.date.unique():
NYT_daily_Warp = NYT_daily_Warp.append({'fips': fips, 'date': pd.to_datetime('2020-01-21'), \
'cases': 0, 'deaths' : 0, 'id' : str(fips) + ', 2020-01-21'}, ignore_index=True)
#making sure each entry has the final day values
if LastDay not in rows.date.unique():
NYT_daily_Warp = NYT_daily_Warp[NYT_daily_Warp['fips'] != fips]
NYT_daily_Warp = NYT_daily_Warp.sort_values(['fips','date']).reset_index(drop=True)
NYT_daily_Warp.to_csv('NYT_daily_Warp.csv')
NYT_daily_Warp_Death = makeHMMUnSupData(NYT_daily_Warp, 'deaths', 'fips')
#This is a list of all the counties and dates
County_List = list(NYT_daily.fips.unique())
Date_List = list(NYT_daily.date.unique())
#This creates a base dataframe that contains all pairs of FIPS codes with the valid dates given in Air_Qual
CL, DL = pd.core.reshape.util.cartesian_product([County_List, Date_List])
BaseFrame = pd.DataFrame(dict(fips=CL, date=DL)).sort_values(['fips','date']).reset_index(drop=True)
BaseFrame['id'] = BaseFrame.fips.astype(str).str.cat(BaseFrame.date.astype(str), sep=', ')
#Making frame of all deaths at all dates to properly do DTW clustering
NYT_daily_Filled = BaseFrame.join(NYT_daily.set_index('id'), on='id', how='outer', lsuffix='',rsuffix='_x').sort_values(['fips', 'date']).drop(columns=['fips_x','date_x']).fillna(0).drop_duplicates(subset=['fips','date']).reset_index(drop=True)
NYT_daily_Filled.to_csv('NYT_daily_Filled.csv')
#List of lists of daily death count for each county, starting 1/23/20, ending most recent date.
NYT_daily_Death_Filled = makeHMMUnSupData(NYT_daily_Filled, 'deaths', 'fips')
#JHU Data
JHU_tot = pd.read_csv(f"{homedir}/data/us/covid/JHU_daily_US.csv").sort_values(['FIPS','Date'])
FIPSlist = JHU_tot.FIPS.unique()
Datelist = JHU_tot.Date.unique()
Datepair = [Datelist[0],Datelist[-1]]
#Getting rid of unneded fips code in the list of total codes
for fips in FIPSlist:
rows = JHU_tot[JHU_tot['FIPS'] == fips]
datelist = rows.Date.unique()
datepair = [datelist[0],datelist[-1]]
if np.array_equal(Datepair,datepair) != True:
JHU_tot = JHU_tot.drop(list(JHU_tot[JHU_tot['FIPS'] == fips].index))
JHU_tot = JHU_tot.sort_values(['FIPS','Date']).reset_index(drop=True)
def monotonicCol(Data, colname):
#Takes a column that should have monotonically increasing data for a column (number of deaths)
#and adjusts the column to ensure this property, iterating backwards through each fips code's entries
ls = []
tempvals = []
for fips in Data.FIPS.unique():
vals = list(Data[Data['FIPS'] == fips][colname])
flag = True
for val in reversed(vals):
if flag:
flag = False
maxval = val
tempvals.append(maxval)
else:
if val > maxval:
tempvals.append(maxval)
else:
maxval = val
tempvals.append(val)
ls.extend(reversed(tempvals))
tempvals = []
return ls
d = {'FIPS': JHU_tot['FIPS'], 'Date' : JHU_tot['Date'], 'Confirmed' : monotonicCol(JHU_tot,'Confirmed'),\
'Deaths' : monotonicCol(JHU_tot,'Deaths'),'Active' : monotonicCol(JHU_tot,'Active'), \
'Recovered' : monotonicCol(JHU_tot,'Recovered')}
#Monotonically increaasing transformation of JHU_tot
JHU_mono = pd.DataFrame(data=d)
def cumtoDaily(Data, colname):
#Takes cumulative column data and turns the data into daily changes
ls = []
column = Data[colname]
for fips in Data.FIPS.unique():
ls.extend(list(Data[Data['FIPS'] == fips][colname].diff().fillna(0)))
return ls
d = {'FIPS': JHU_mono['FIPS'], 'Date' : JHU_mono['Date'], 'Confirmed' : cumtoDaily(JHU_mono,'Confirmed'),\
'Deaths' : cumtoDaily(JHU_mono,'Deaths'),'Active' : cumtoDaily(JHU_mono,'Active'), \
'Recovered' : cumtoDaily(JHU_mono,'Recovered')}
#Daily changing data based on monotonically transformed data
JHU_daily = pd.DataFrame(data=d)
JHU_daily.to_csv('JHU_Daily.csv')
#List of lists of daily death count for each county, starting 3/23/20, ending most recent date.
JHU_daily_death = makeHMMUnSupData(JHU_daily, 'Deaths', 'FIPS')
#Our three types of death lists for DTW clusterings
NYT_daily_Warp_Death
NYT_daily_Death_Filled
JHU_daily_death
print(len(NYT_daily_Warp_Death))
print(np.mean([len(a) for a in NYT_daily_Warp_Death]))
print(np.mean([sum(a) for a in NYT_daily_Warp_Death]))
print(np.mean([np.mean(a) for a in NYT_daily_Warp_Death]))
print(len(NYT_daily_Death_Filled))
print(np.mean([len(a) for a in NYT_daily_Death_Filled]))
print(np.mean([sum(a) for a in NYT_daily_Death_Filled]))
print(np.mean([np.mean(a) for a in NYT_daily_Death_Filled]))
print(len(JHU_daily_death))
print(np.mean([len(a) for a in JHU_daily_death]))
print(np.mean([sum(a) for a in JHU_daily_death]))
print(np.mean([np.mean(a) for a in JHU_daily_death]))
#Saving the death data filesw
f = open('NYT_daily_Warp_Death.txt', 'w')
simplejson.dump(NYT_daily_Warp_Death, f)
f.close()
g = open('NYT_daily_Death_Filled.txt', 'w')
simplejson.dump(NYT_daily_Death_Filled, g)
g.close()
h = open('JHU_daily_death.txt', 'w')
simplejson.dump(JHU_daily_death, h)
h.close()
```
| github_jupyter |
```
import numpy as np
import pandas as pd
# Code to read csv file into colaboratory:
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
'''
downloaded = drive.CreateFile({'id':'1fjM5LTtbHpkeI0CxnuMWWc0vC3_ldhw-'})
downloaded.GetContentFile('quora_questions.csv')
quora= pd.read_csv("quora_questions.csv")
quora.head()
'''
```
# Question and Answer Chat Bots
## Loading the Data
We will be working with the Babi Data Set from Facebook Research.
Full Details: https://research.fb.com/downloads/babi/
- Jason Weston, Antoine Bordes, Sumit Chopra, Tomas Mikolov, Alexander M. Rush,
"Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks",
http://arxiv.org/abs/1502.05698
```
downloaded = drive.CreateFile({'id':'1A7O67NVUsahGvcv497TdnD9LFk5BvDDI'})
downloaded.GetContentFile('train_qa.txt')
downloaded = drive.CreateFile({'id':'1_G9c3NGMzENi1VGrInCZmOilAafFUhhE'})
downloaded.GetContentFile('test_qa.txt')
import pickle
import numpy as np
with open("train_qa.txt", "rb") as fp: # Unpickling
train_data = pickle.load(fp)
with open("test_qa.txt", "rb") as fp: # Unpickling
test_data = pickle.load(fp)
type(test_data)
type(train_data)
len(test_data)
len(train_data)
train_data[0]
' '.join(train_data[0][0])
' '.join(train_data[0][1])
train_data[0][2]
```
-----
## Setting up Vocabulary of All Words
```
# Create a set that holds the vocab words
vocab = set()
all_data = test_data + train_data
len(all_data)
```
A set in python is an unordered collection of unique elements
```
for story, question , answer in all_data:
# In case you don't know what a union of sets is:
# https://www.programiz.com/python-programming/methods/set/union
# set of story is: set(train_data[0]0)
vocab = vocab.union(set(story))
vocab = vocab.union(set(question))
vocab.add('no')
vocab.add('yes')
vocab
vocab_len = len(vocab) + 1 #we add an extra space to hold a 0 for Keras's pad_sequences
max_story_len = max([len(data[0]) for data in all_data])
max_story_len
max_question_len = max([len(data[1]) for data in all_data]) # index for story is "1"
max_question_len
```
## Vectorizing the Data
```
vocab
# Reserve 0 for pad_sequences
vocab_size = len(vocab) + 1
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
# integer encode sequences of words
tokenizer = Tokenizer(filters=[])
tokenizer.fit_on_texts(vocab)
tokenizer.word_index
train_story_text = []
train_question_text = []
train_answers = []
for story,question,answer in train_data:
train_story_text.append(story)
train_question_text.append(question)
train_story_seq = tokenizer.texts_to_sequences(train_story_text)
print (len(train_story_text))
print (len(train_story_seq))
# word_index = tokenizer.word_index
```
### Functionalize Vectorization
```
def vectorize_stories(data, word_index=tokenizer.word_index, max_story_len=max_story_len,max_question_len=max_question_len):
'''
INPUT:
data: consisting of Stories,Queries,and Answers
word_index: word index dictionary from tokenizer
max_story_len: the length of the longest story (used for pad_sequences function)
max_question_len: length of the longest question (used for pad_sequences function)
OUTPUT:
Vectorizes the stories,questions, and answers into padded sequences. We first loop for every story, query , and
answer in the data. Then we convert the raw words to an word index value. Then we append each set to their appropriate
output list. Then once we have converted the words to numbers, we pad the sequences so they are all of equal length.
Returns this in the form of a tuple (X,Xq,Y) (padded based on max lengths)
Padding is necessary to cut down a story if it is too long and stories to have same padding
'''
# X = STORIES
X = []
# Xq = QUERY/QUESTION
Xq = []
# Y = CORRECT ANSWER
Y = []
for story, query, answer in data:
# Grab the word index for every word in story
x = [word_index[word.lower()] for word in story]
# Grab the word index for every word in query
xq = [word_index[word.lower()] for word in query]
# Grab the Answers (either Yes/No so we don't need to use list comprehension here)
# Index 0 is reserved so we're going to use + 1
y = np.zeros(len(word_index) + 1)
# Now that y is all zeros and we know its just Yes/No , we can use numpy logic to create this assignment
#
y[word_index[answer]] = 1
# Append each set of story,query, and answer to their respective holding lists
X.append(x)
Xq.append(xq)
Y.append(y)
# Finally, pad the sequences based on their max length so the RNN can be trained on uniformly long sequences.
# RETURN TUPLE FOR UNPACKING
return (pad_sequences(X, maxlen=max_story_len),pad_sequences(Xq, maxlen=max_question_len), np.array(Y))
inputs_train, queries_train, answers_train = vectorize_stories(train_data)
inputs_test, queries_test, answers_test = vectorize_stories(test_data)
inputs_test
queries_test
answers_test
sum(answers_test)
tokenizer.word_index['yes']
tokenizer.word_index['no']
```
## Creating the Model
```
from keras.models import Sequential, Model
from keras.layers.embeddings import Embedding
from keras.layers import Input, Activation, Dense, Permute, Dropout
from keras.layers import add, dot, concatenate
from keras.layers import LSTM
```
### Placeholders for Inputs
Recall we technically have two inputs, stories and questions. So we need to use placeholders. `Input()` is used to instantiate a Keras tensor.
```
# placeholder shape= (max_story_len, batch_size)
input_sequence = Input((max_story_len,)) # the input shape is a tuple
question = Input((max_question_len,))
```
### Building the Networks
To understand why we chose this setup, make sure to read the paper we are using:
* Sainbayar Sukhbaatar, Arthur Szlam, Jason Weston, Rob Fergus,
"End-To-End Memory Networks",
http://arxiv.org/abs/1503.08895
## Encoders
### Input Encoder m
```
# Input gets embedded to a sequence of vectors
input_encoder_m = Sequential()
input_encoder_m.add(Embedding(input_dim=vocab_size,output_dim=64)) # output dimensions from the paper= 64
input_encoder_m.add(Dropout(0.3))
# This encoder will output:
# (samples, story_maxlen, embedding_dim)
# Input Encoder c
# embed the input into a sequence of vectors of size query_maxlen
input_encoder_c = Sequential()
input_encoder_c.add(Embedding(input_dim=vocab_size,output_dim=max_question_len))
input_encoder_c.add(Dropout(0.3))
# output: (samples, story_maxlen, query_maxlen)
# Question Encoder
# embed the question into a sequence of vectors
question_encoder = Sequential()
question_encoder.add(Embedding(input_dim=vocab_size,
output_dim=64,
input_length=max_question_len))
question_encoder.add(Dropout(0.3))
# output: (samples, query_maxlen, embedding_dim)
# Encode the Sequences
# encode input sequence and questions (which are indices)
# to sequences of dense vectors
input_encoded_m = input_encoder_m(input_sequence)
input_encoded_c = input_encoder_c(input_sequence)
question_encoded = question_encoder(question)
# Use dot product to compute the match between first input vector seq and the query
# shape: `(samples, story_maxlen, query_maxlen)`
match = dot([input_encoded_m, question_encoded], axes=(2, 2))
match = Activation('softmax')(match)
# Add this match matrix with the second input vector sequence
# add the match matrix with the second input vector sequence
response = add([match, input_encoded_c]) # (samples, story_maxlen, query_maxlen)
response = Permute((2, 1))(response) # (samples, query_maxlen, story_maxlen)
# concatenate
# concatenate the match matrix with the question vector sequence
answer = concatenate([response, question_encoded])
answer
# Reduce with RNN (LSTM)
answer = LSTM(32)(answer) # (samples, 32)
# Regularization with Dropout
answer = Dropout(0.5)(answer)
answer = Dense(vocab_size)(answer) # (samples, vocab_size)
# we output a probability distribution over the vocabulary
answer = Activation('softmax')(answer)
# build the final model
model = Model([input_sequence, question], answer)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
# train
history = model.fit([inputs_train, queries_train], answers_train,batch_size=32,epochs=10,validation_data=([inputs_test, queries_test], answers_test))
### Saving the Model
filename = 'chatbot_120_epochs.h5'
model.save(filename)
```
## Evaluating the Model
### Plotting Out Training History
```
import matplotlib.pyplot as plt
%matplotlib inline
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
```
### Evaluating on Given Test Set
```
model.load_weights(filename)
pred_results = model.predict(([inputs_test, queries_test]))
test_data[0][0]
story =' '.join(word for word in test_data[0][0])
print(story)
query = ' '.join(word for word in test_data[0][1])
print(query)
print("True Test Answer from Data is:",test_data[0][2])
#Generate prediction from model
val_max = np.argmax(pred_results[0])
for key, val in tokenizer.word_index.items():
if val == val_max:
k = key
print("Predicted answer is: ", k)
print("Probability of certainty was: ", pred_results[0][val_max])
```
## Writing Your Own Stories and Questions
Remember you can only use words from the existing vocab
```
vocab
# Note the whitespace of the periods
my_story = "John left the kitchen . Sandra dropped the football in the garden ."
my_story.split()
my_question = "Is the football in the garden ?"
my_question.split()
mydata = [(my_story.split(),my_question.split(),'yes')]
my_story,my_ques,my_ans = vectorize_stories(mydata)
pred_results = model.predict(([ my_story, my_ques]))
#Generate prediction from model
val_max = np.argmax(pred_results[0])
for key, val in tokenizer.word_index.items():
if val == val_max:
k = key
print("Predicted answer is: ", k)
print("Probability of certainty was: ", pred_results[0][val_max])
```
| github_jupyter |
# Fuzzing with Grammars
In the chapter on ["Mutation-Based Fuzzing"](MutationFuzzer.ipynb), we have seen how to use extra hints – such as sample input files – to speed up test generation. In this chapter, we take this idea one step further, by providing a _specification_ of the legal inputs to a program. Specifying inputs via a _grammar_ allows for very systematic and efficient test generation, in particular for complex input formats. Grammars also serve as the base for configuration fuzzing, API fuzzing, GUI fuzzing, and many more.
**Prerequisites**
* You should know how basic fuzzing works, e.g. from the [Chapter introducing fuzzing](Fuzzer.ipynb).
* Knowledge on [mutation-based fuzzing](MutationFuzzer.ipynb) and [coverage](Coverage.ipynb) is _not_ required yet, but still recommended.
```
import bookutils
import Fuzzer
```
## Synopsis
<!-- Automatically generated. Do not edit. -->
To [use the code provided in this chapter](Importing.ipynb), write
```python
>>> from fuzzingbook.Grammars import <identifier>
```
and then make use of the following features.
This chapter introduces _grammars_ as a simple means to specify input languages, and to use them for testing programs with syntactically valid inputs. A grammar is defined as a mapping of nonterminal symbols to lists of alternative expansions, as in the following example:
```python
>>> US_PHONE_GRAMMAR = {
>>> "<start>": ["<phone-number>"],
>>> "<phone-number>": ["(<area>)<exchange>-<line>"],
>>> "<area>": ["<lead-digit><digit><digit>"],
>>> "<exchange>": ["<lead-digit><digit><digit>"],
>>> "<line>": ["<digit><digit><digit><digit>"],
>>> "<lead-digit>": ["2", "3", "4", "5", "6", "7", "8", "9"],
>>> "<digit>": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
>>> }
>>>
>>> assert is_valid_grammar(US_PHONE_GRAMMAR)
```
Nonterminal symbols are enclosed in angle brackets (say, `<digit>`). To generate an input string from a grammar, a _producer_ starts with the start symbol (`<start>`) and randomly chooses a random expansion for this symbol. It continues the process until all nonterminal symbols are expanded. The function `simple_grammar_fuzzer()` does just that:
```python
>>> [simple_grammar_fuzzer(US_PHONE_GRAMMAR) for i in range(5)]
```
In practice, though, instead of `simple_grammar_fuzzer()`, you should use [the `GrammarFuzzer` class](GrammarFuzzer.ipynb) or one of its [coverage-based](GrammarCoverageFuzzer.ipynb), [probabilistic-based](ProbabilisticGrammarFuzzer.ipynb), or [generator-based](GeneratorGrammarFuzzer.ipynb) derivatives; these are more efficient, protect against infinite growth, and provide several additional features.
This chapter also introduces a [grammar toolbox](#A-Grammar-Toolbox) with several helper functions that ease the writing of grammars, such as using shortcut notations for character classes and repetitions, or extending grammars
## Input Languages
All possible behaviors of a program can be triggered by its input. "Input" here can be a wide range of possible sources: We are talking about data that is read from files, from the environment, or over the network, data input by the user, or data acquired from interaction with other resources. The set of all these inputs determines how the program will behave – including its failures. When testing, it is thus very helpful to think about possible input sources, how to get them under control, and _how to systematically test them_.
For the sake of simplicity, we will assume for now that the program has only one source of inputs; this is the same assumption we have been using in the previous chapters, too. The set of valid inputs to a program is called a _language_. Languages range from the simple to the complex: the CSV language denotes the set of valid comma-separated inputs, whereas the Python language denotes the set of valid Python programs. We commonly separate data languages and programming languages, although any program can also be treated as input data (say, to a compiler). The [Wikipedia page on file formats](https://en.wikipedia.org/wiki/List_of_file_formats) lists more than 1,000 different file formats, each of which is its own language.
To formally describe languages, the field of *formal languages* has devised a number of *language specifications* that describe a language. *Regular expressions* represent the simplest class of these languages to denote sets of strings: The regular expression `[a-z]*`, for instance, denotes a (possibly empty) sequence of lowercase letters. *Automata theory* connects these languages to automata that accept these inputs; *finite state machines*, for instance, can be used to specify the language of regular expressions.
Regular expressions are great for not-too-complex input formats, and the associated finite state machines have many properties that make them great for reasoning. To specify more complex inputs, though, they quickly encounter limitations. At the other end of the language spectrum, we have *universal grammars* that denote the language accepted by *Turing machines*. A Turing machine can compute anything that can be computed; and with Python being Turing-complete, this means that we can also use a Python program $p$ to specify or even enumerate legal inputs. But then, computer science theory also tells us that each such testing program has to be written specifically for the program to be tested, which is not the level of automation we want.
## Grammars
The middle ground between regular expressions and Turing machines is covered by *grammars*. Grammars are among the most popular (and best understood) formalisms to formally specify input languages. Using a grammar, one can express a wide range of the properties of an input language. Grammars are particularly great for expressing the *syntactical structure* of an input, and are the formalism of choice to express nested or recursive inputs. The grammars we use are so-called *context-free grammars*, one of the easiest and most popular grammar formalisms.
### Rules and Expansions
A grammar consists of a *start symbol* and a set of *expansion rules* (or simply *rules*) which indicate how the start symbol (and other symbols) can be expanded. As an example, consider the following grammar, denoting a sequence of two digits:
```
<start> ::= <digit><digit>
<digit> ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
```
To read such a grammar, start with the start symbol (`<start>`). An expansion rule `<A> ::= <B>` means that the symbol on the left side (`<A>`) can be replaced by the string on the right side (`<B>`). In the above grammar, `<start>` would be replaced by `<digit><digit>`.
In this string again, `<digit>` would be replaced by the string on the right side of the `<digit>` rule. The special operator `|` denotes *expansion alternatives* (or simply *alternatives*), meaning that any of the digits can be chosen for an expansion. Each `<digit>` thus would be expanded into one of the given digits, eventually yielding a string between `00` and `99`. There are no further expansions for `0` to `9`, so we are all set.
The interesting thing about grammars is that they can be *recursive*. That is, expansions can make use of symbols expanded earlier – which would then be expanded again. As an example, consider a grammar that describes integers:
```
<start> ::= <integer>
<integer> ::= <digit> | <digit><integer>
<digit> ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
```
Here, a `<integer>` is either a single digit, or a digit followed by another integer. The number `1234` thus would be represented as a single digit `1`, followed by the integer `234`, which in turn is a digit `2`, followed by the integer `34`.
If we wanted to express that an integer can be preceded by a sign (`+` or `-`), we would write the grammar as
```
<start> ::= <number>
<number> ::= <integer> | +<integer> | -<integer>
<integer> ::= <digit> | <digit><integer>
<digit> ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
```
These rules formally define the language: Anything that can be derived from the start symbol is part of the language; anything that cannot is not.
### Arithmetic Expressions
Let us expand our grammar to cover full *arithmetic expressions* – a poster child example for a grammar. We see that an expression (`<expr>`) is either a sum, or a difference, or a term; a term is either a product or a division, or a factor; and a factor is either a number or a parenthesized expression. Almost all rules can have recursion, and thus allow arbitrary complex expressions such as `(1 + 2) * (3.4 / 5.6 - 789)`.
```
<start> ::= <expr>
<expr> ::= <term> + <expr> | <term> - <expr> | <term>
<term> ::= <term> * <factor> | <term> / <factor> | <factor>
<factor> ::= +<factor> | -<factor> | (<expr>) | <integer> | <integer>.<integer>
<integer> ::= <digit><integer> | <digit>
<digit> ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
```
In such a grammar, if we start with `<start>` and then expand one symbol after another, randomly choosing alternatives, we can quickly produce one valid arithmetic expression after another. Such *grammar fuzzing* is highly effective as it comes to produce complex inputs, and this is what we will implement in this chapter.
## Representing Grammars in Python
Our first step in building a grammar fuzzer is to find an appropriate format for grammars. To make the writing of grammars as simple as possible, we use a format that is based on strings and lists. Our grammars in Python take the format of a _mapping_ between symbol names and expansions, where expansions are _lists_ of alternatives. A one-rule grammar for digits thus takes the form
```
DIGIT_GRAMMAR = {
"<start>":
["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
}
```
whereas the full grammar for arithmetic expressions looks like this:
```
EXPR_GRAMMAR = {
"<start>":
["<expr>"],
"<expr>":
["<term> + <expr>", "<term> - <expr>", "<term>"],
"<term>":
["<factor> * <term>", "<factor> / <term>", "<factor>"],
"<factor>":
["+<factor>",
"-<factor>",
"(<expr>)",
"<integer>.<integer>",
"<integer>"],
"<integer>":
["<digit><integer>", "<digit>"],
"<digit>":
["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
}
```
In the grammar, every symbol can be defined exactly once. We can access any rule by its symbol...
```
EXPR_GRAMMAR["<digit>"]
```
....and we can check whether a symbol is in the grammar:
```
"<identifier>" in EXPR_GRAMMAR
```
Note that we assume that on the left hand side of a rule (i.e., the key in the mapping) is always a single symbol. This is the property that gives our grammars the characterization of _context-free_.
## Some Definitions
We assume that the canonical start symbol is `<start>`:
```
START_SYMBOL = "<start>"
```
The handy `nonterminals()` function extracts the list of nonterminal symbols (i.e., anything between `<` and `>`, except spaces) from an expansion.
```
import re
RE_NONTERMINAL = re.compile(r'(<[^<> ]*>)')
def nonterminals(expansion):
# In later chapters, we allow expansions to be tuples,
# with the expansion being the first element
if isinstance(expansion, tuple):
expansion = expansion[0]
return re.findall(RE_NONTERMINAL, expansion)
assert nonterminals("<term> * <factor>") == ["<term>", "<factor>"]
assert nonterminals("<digit><integer>") == ["<digit>", "<integer>"]
assert nonterminals("1 < 3 > 2") == []
assert nonterminals("1 <3> 2") == ["<3>"]
assert nonterminals("1 + 2") == []
assert nonterminals(("<1>", {'option': 'value'})) == ["<1>"]
```
Likewise, `is_nonterminal()` checks whether some symbol is a nonterminal:
```
def is_nonterminal(s):
return re.match(RE_NONTERMINAL, s)
assert is_nonterminal("<abc>")
assert is_nonterminal("<symbol-1>")
assert not is_nonterminal("+")
```
## A Simple Grammar Fuzzer
Let us now put the above grammars to use. We will build a very simple grammar fuzzer that starts with a start symbol (`<start>`) and then keeps on expanding it. To avoid expansion to infinite inputs, we place a limit (`max_nonterminals`) on the number of nonterminals. Furthermore, to avoid being stuck in a situation where we cannot reduce the number of symbols any further, we also limit the total number of expansion steps.
```
import random
class ExpansionError(Exception):
pass
def simple_grammar_fuzzer(grammar, start_symbol=START_SYMBOL,
max_nonterminals=10, max_expansion_trials=100,
log=False):
term = start_symbol
expansion_trials = 0
while len(nonterminals(term)) > 0:
symbol_to_expand = random.choice(nonterminals(term))
expansions = grammar[symbol_to_expand]
expansion = random.choice(expansions)
new_term = term.replace(symbol_to_expand, expansion, 1)
if len(nonterminals(new_term)) < max_nonterminals:
term = new_term
if log:
print("%-40s" % (symbol_to_expand + " -> " + expansion), term)
expansion_trials = 0
else:
expansion_trials += 1
if expansion_trials >= max_expansion_trials:
raise ExpansionError("Cannot expand " + repr(term))
return term
```
Let us see how this simple grammar fuzzer obtains an arithmetic expression from the start symbol:
```
simple_grammar_fuzzer(grammar=EXPR_GRAMMAR, max_nonterminals=3, log=True)
```
By increasing the limit of nonterminals, we can quickly get much longer productions:
```
for i in range(10):
print(simple_grammar_fuzzer(grammar=EXPR_GRAMMAR, max_nonterminals=5))
```
Note that this fuzzer is rather inefficient due to the large number of search and replace operations. On the other hand, the implementation is straightforward and does the job in most cases. For this chapter, we'll stick to it; in the [next chapter](GrammarFuzzer.ipynb), we'll show how to build a more efficient one.
## Visualizing Grammars as Railroad Diagrams
With grammars, we can easily specify the format for several of the examples we discussed earlier. The above arithmetic expressions, for instance, can be directly sent into `bc` (or any other program that takes arithmetic expressions). Before we introduce a few additional grammars, let us give a means to _visualize_ them, giving an alternate view to aid their understanding.
_Railroad diagrams_, also called _syntax diagrams_, are a graphical representation of context-free grammars. They are read left to right, following possible "rail" tracks; the sequence of symbols encountered on the track defines the language.
We use [RailroadDiagrams](RailroadDiagrams.ipynb), an external library for visualization.
```
from RailroadDiagrams import NonTerminal, Terminal, Choice, HorizontalChoice, Sequence, Diagram, show_diagram
from IPython.display import SVG, display
```
We first define the method `syntax_diagram_symbol()` to visualize a given symbol. Terminal symbols are denoted as ovals, whereas nonterminal symbols (such as `<term>`) are denoted as rectangles.
```
def syntax_diagram_symbol(symbol):
if is_nonterminal(symbol):
return NonTerminal(symbol[1:-1])
else:
return Terminal(symbol)
SVG(show_diagram(syntax_diagram_symbol('<term>')))
```
We define `syntax_diagram_expr()` to visualize expansion alternatives.
```
def syntax_diagram_expr(expansion):
# In later chapters, we allow expansions to be tuples,
# with the expansion being the first element
if isinstance(expansion, tuple):
expansion = expansion[0]
symbols = [sym for sym in re.split(RE_NONTERMINAL, expansion) if sym != ""]
if len(symbols) == 0:
symbols = [""] # special case: empty expansion
return Sequence(*[syntax_diagram_symbol(sym) for sym in symbols])
SVG(show_diagram(syntax_diagram_expr(EXPR_GRAMMAR['<term>'][0])))
```
This is the first alternative of `<term>` – a `<factor>` followed by `*` and a `<term>`.
Next, we define `syntax_diagram_alt()` for displaying alternate expressions.
```
from itertools import zip_longest
def syntax_diagram_alt(alt):
max_len = 5
alt_len = len(alt)
if alt_len > max_len:
iter_len = alt_len // max_len
alts = list(zip_longest(*[alt[i::iter_len] for i in range(iter_len)]))
exprs = [[syntax_diagram_expr(expr) for expr in alt
if expr is not None] for alt in alts]
choices = [Choice(len(expr) // 2, *expr) for expr in exprs]
return HorizontalChoice(*choices)
else:
return Choice(alt_len // 2, *[syntax_diagram_expr(expr) for expr in alt])
SVG(show_diagram(syntax_diagram_alt(EXPR_GRAMMAR['<digit>'])))
```
We see that a `<digit>` can be any single digit from `0` to `9`.
Finally, we define `syntax_diagram()` which given a grammar, displays the syntax diagram of its rules.
```
def syntax_diagram(grammar):
from IPython.display import SVG, display
for key in grammar:
print("%s" % key[1:-1])
display(SVG(show_diagram(syntax_diagram_alt(grammar[key]))))
syntax_diagram(EXPR_GRAMMAR)
```
This railroad representation will come in handy as it comes to visualizing the structure of grammars – especially for more complex grammars.
## Some Grammars
Let us create (and visualize) some more grammars and use them for fuzzing.
### A CGI Grammar
Here's a grammar for `cgi_decode()` introduced in the [chapter on coverage](Coverage.ipynb).
```
CGI_GRAMMAR = {
"<start>":
["<string>"],
"<string>":
["<letter>", "<letter><string>"],
"<letter>":
["<plus>", "<percent>", "<other>"],
"<plus>":
["+"],
"<percent>":
["%<hexdigit><hexdigit>"],
"<hexdigit>":
["0", "1", "2", "3", "4", "5", "6", "7",
"8", "9", "a", "b", "c", "d", "e", "f"],
"<other>": # Actually, could be _all_ letters
["0", "1", "2", "3", "4", "5", "a", "b", "c", "d", "e", "-", "_"],
}
syntax_diagram(CGI_GRAMMAR)
```
In contrast to [basic fuzzing](Fuzzer.ipynb) or [mutation-based fuzzing](MutationFuzzer.ipynb), the grammar quickly produces all sorts of combinations:
```
for i in range(10):
print(simple_grammar_fuzzer(grammar=CGI_GRAMMAR, max_nonterminals=10))
```
### A URL Grammar
The same properties we have seen for CGI input also hold for more complex inputs. Let us use a grammar to produce a large number of valid URLs:
```
URL_GRAMMAR = {
"<start>":
["<url>"],
"<url>":
["<scheme>://<authority><path><query>"],
"<scheme>":
["http", "https", "ftp", "ftps"],
"<authority>":
["<host>", "<host>:<port>", "<userinfo>@<host>", "<userinfo>@<host>:<port>"],
"<host>": # Just a few
["cispa.saarland", "www.google.com", "fuzzingbook.com"],
"<port>":
["80", "8080", "<nat>"],
"<nat>":
["<digit>", "<digit><digit>"],
"<digit>":
["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"],
"<userinfo>": # Just one
["user:password"],
"<path>": # Just a few
["", "/", "/<id>"],
"<id>": # Just a few
["abc", "def", "x<digit><digit>"],
"<query>":
["", "?<params>"],
"<params>":
["<param>", "<param>&<params>"],
"<param>": # Just a few
["<id>=<id>", "<id>=<nat>"],
}
syntax_diagram(URL_GRAMMAR)
```
Again, within milliseconds, we can produce plenty of valid inputs.
```
for i in range(10):
print(simple_grammar_fuzzer(grammar=URL_GRAMMAR, max_nonterminals=10))
```
### A Natural Language Grammar
Finally, grammars are not limited to *formal languages* such as computer inputs, but can also be used to produce *natural language*. This is the grammar we used to pick a title for this book:
```
TITLE_GRAMMAR = {
"<start>": ["<title>"],
"<title>": ["<topic>: <subtopic>"],
"<topic>": ["Generating Software Tests", "<fuzzing-prefix>Fuzzing", "The Fuzzing Book"],
"<fuzzing-prefix>": ["", "The Art of ", "The Joy of "],
"<subtopic>": ["<subtopic-main>",
"<subtopic-prefix><subtopic-main>",
"<subtopic-main><subtopic-suffix>"],
"<subtopic-main>": ["Breaking Software",
"Generating Software Tests",
"Principles, Techniques and Tools"],
"<subtopic-prefix>": ["", "Tools and Techniques for "],
"<subtopic-suffix>": [" for <reader-property> and <reader-property>",
" for <software-property> and <software-property>"],
"<reader-property>": ["Fun", "Profit"],
"<software-property>": ["Robustness", "Reliability", "Security"],
}
syntax_diagram(TITLE_GRAMMAR)
titles = set()
while len(titles) < 10:
titles.add(simple_grammar_fuzzer(
grammar=TITLE_GRAMMAR, max_nonterminals=10))
titles
```
(If you find that there is redundancy ("Robustness and Robustness") in here: In [our chapter on coverage-based fuzzing](GrammarCoverageFuzzer.ipynb), we will show how to cover each expansion only once. And if you like some alternatives more than others, [probabilistic grammar fuzzing](ProbabilisticGrammarFuzzer.ipynb) will be there for you.)
## Grammars as Mutation Seeds
One very useful property of grammars is that they produce mostly valid inputs. From a syntactical standpoint, the inputs are actually _always_ valid, as they satisfy the constraints of the given grammar. (Of course, one needs a valid grammar in the first place.) However, there are also _semantical_ properties that cannot be easily expressed in a grammar. If, say, for a URL, the port range is supposed to be between 1024 and 2048, this is hard to write in a grammar. If one has to satisfy more complex constraints, one quickly reaches the limits of what a grammar can express.
One way around this is to attach constraints to grammars, as we will discuss [later in this book](ConstraintFuzzer.ipynb). Another possibility is to put together the strengths of grammar-based fuzzing and [mutation-based fuzzing](MutationFuzzer.ipynb). The idea is to use the grammar-generated inputs as *seeds* for further mutation-based fuzzing. This way, we can explore not only _valid_ inputs, but also check out the _boundaries_ between valid and invalid inputs. This is particularly interesting as slightly invalid inputs allow to find parser errors (which are often abundant). As with fuzzing in general, it is the unexpected which reveals errors in programs.
To use our generated inputs as seeds, we can feed them directly into the mutation fuzzers introduced earlier:
```
from MutationFuzzer import MutationFuzzer # minor dependency
number_of_seeds = 10
seeds = [
simple_grammar_fuzzer(
grammar=URL_GRAMMAR,
max_nonterminals=10) for i in range(number_of_seeds)]
seeds
m = MutationFuzzer(seeds)
[m.fuzz() for i in range(20)]
```
While the first 10 `fuzz()` calls return the seeded inputs (as designed), the later ones again create arbitrary mutations. Using `MutationCoverageFuzzer` instead of `MutationFuzzer`, we could again have our search guided by coverage – and thus bring together the best of multiple worlds.
## A Grammar Toolbox
Let us now introduce a few techniques that help us writing grammars.
### Escapes
With `<` and `>` delimiting nonterminals in our grammars, how can we actually express that some input should contain `<` and `>`? The answer is simple: Just introduce a symbol for them.
```
simple_nonterminal_grammar = {
"<start>": ["<nonterminal>"],
"<nonterminal>": ["<left-angle><identifier><right-angle>"],
"<left-angle>": ["<"],
"<right-angle>": [">"],
"<identifier>": ["id"] # for now
}
```
In `simple_nonterminal_grammar`, neither the expansion for `<left-angle>` nor the expansion for `<right-angle>` can be mistaken as a nonterminal. Hence, we can produce as many as we want.
### Extending Grammars
In the course of this book, we frequently run into the issue of creating a grammar by _extending_ an existing grammar with new features. Such an extension is very much like subclassing in object-oriented programming.
To create a new grammar $g'$ from an existing grammar $g$, we first copy $g$ into $g'$, and then go and extend existing rules with new alternatives and/or add new symbols. Here's an example, extending the above `nonterminal` grammar with a better rule for identifiers:
```
import copy
nonterminal_grammar = copy.deepcopy(simple_nonterminal_grammar)
nonterminal_grammar["<identifier>"] = ["<idchar>", "<identifier><idchar>"]
nonterminal_grammar["<idchar>"] = ['a', 'b', 'c', 'd'] # for now
nonterminal_grammar
```
Since such an extension of grammars is a common operation, we introduce a custom function `extend_grammar()` which first copies the given grammar and then updates it from a dictionary, using the Python dictionary `update()` method:
```
def extend_grammar(grammar, extension={}):
new_grammar = copy.deepcopy(grammar)
new_grammar.update(extension)
return new_grammar
```
This call to `extend_grammar()` extends `simple_nonterminal_grammar` to `nonterminal_grammar` just like the "manual" example above:
```
nonterminal_grammar = extend_grammar(simple_nonterminal_grammar,
{
"<identifier>": ["<idchar>", "<identifier><idchar>"],
# for now
"<idchar>": ['a', 'b', 'c', 'd']
}
)
```
### Character Classes
In the above `nonterminal_grammar`, we have enumerated only the first few letters; indeed, enumerating all letters or digits in a grammar manually, as in `<idchar> ::= 'a' | 'b' | 'c' ...` is a bit painful.
However, remember that grammars are part of a program, and can thus also be constructed programmatically. We introduce a function `srange()` which constructs a list of characters in a string:
```
import string
def srange(characters):
"""Construct a list with all characters in the string"""
return [c for c in characters]
```
If we pass it the constant `string.ascii_letters`, which holds all ASCII letters, `srange()` returns a list of all ASCII letters:
```
string.ascii_letters
srange(string.ascii_letters)[:10]
```
We can use such constants in our grammar to quickly define identifiers:
```
nonterminal_grammar = extend_grammar(nonterminal_grammar,
{
"<idchar>": srange(string.ascii_letters) + srange(string.digits) + srange("-_")
}
)
[simple_grammar_fuzzer(nonterminal_grammar, "<identifier>") for i in range(10)]
```
The shortcut `crange(start, end)` returns a list of all characters in the ASCII range of `start` to (including) `end`:
```
def crange(character_start, character_end):
return [chr(i)
for i in range(ord(character_start), ord(character_end) + 1)]
```
We can use this to express ranges of characters:
```
crange('0', '9')
assert crange('a', 'z') == srange(string.ascii_lowercase)
```
### Grammar Shortcuts
In the above `nonterminal_grammar`, as in other grammars, we have to express repetitions of characters using _recursion_, that is, by referring to the original definition:
```
nonterminal_grammar["<identifier>"]
```
It could be a bit easier if we simply could state that a nonterminal should be a non-empty sequence of letters – for instance, as in
```
<identifier> = <idchar>+
```
where `+` denotes a non-empty repetition of the symbol it follows.
Operators such as `+` are frequently introduced as handy _shortcuts_ in grammars. Formally, our grammars come in the so-called [Backus-Naur form](https://en.wikipedia.org/wiki/Backus-Naur_form), or *BNF* for short. Operators _extend_ BNF to so-called _extended BNF*, or *EBNF* for short:
* The form `<symbol>?` indicates that `<symbol>` is optional – that is, it can occur 0 or 1 times.
* The form `<symbol>+` indicates that `<symbol>` can occur 1 or more times repeatedly.
* The form `<symbol>*` indicates that `<symbol>` can occur 0 or more times. (In other words, it is an optional repetition.)
To make matters even more interesting, we would like to use _parentheses_ with the above shortcuts. Thus, `(<foo><bar>)?` indicates that the sequence of `<foo>` and `<bar>` is optional.
Using such operators, we can define the identifier rule in a simpler way. To this end, let us create a copy of the original grammar and modify the `<identifier>` rule:
```
nonterminal_ebnf_grammar = extend_grammar(nonterminal_grammar,
{
"<identifier>": ["<idchar>+"]
}
)
```
Likewise, we can simplify the expression grammar. Consider how signs are optional, and how integers can be expressed as sequences of digits.
```
EXPR_EBNF_GRAMMAR = {
"<start>":
["<expr>"],
"<expr>":
["<term> + <expr>", "<term> - <expr>", "<term>"],
"<term>":
["<factor> * <term>", "<factor> / <term>", "<factor>"],
"<factor>":
["<sign>?<factor>", "(<expr>)", "<integer>(.<integer>)?"],
"<sign>":
["+", "-"],
"<integer>":
["<digit>+"],
"<digit>":
srange(string.digits)
}
```
Our aim is to convert EBNF grammars such as the ones above into a regular BNF grammar. This is done by four rules:
1. An expression `(content)op`, where `op` is one of `?`, `+`, `*`, becomes `<new-symbol>op`, with a new rule `<new-symbol> ::= content`.
2. An expression `<symbol>?` becomes `<new-symbol>`, where `<new-symbol> ::= <empty> | <symbol>`.
3. An expression `<symbol>+` becomes `<new-symbol>`, where `<new-symbol> ::= <symbol> | <symbol><new-symbol>`.
4. An expression `<symbol>*` becomes `<new-symbol>`, where `<new-symbol> ::= <empty> | <symbol><new-symbol>`.
Here, `<empty>` expands to the empty string, as in `<empty> ::= `. (This is also called an *epsilon expansion*.)
If these operators remind you of _regular expressions_, this is not by accident: Actually, any basic regular expression can be converted into a grammar using the above rules (and character classes with `crange()`, as defined above).
Applying these rules on the examples above yields the following results:
* `<idchar>+` becomes `<idchar><new-symbol>` with `<new-symbol> ::= <idchar> | <idchar><new-symbol>`.
* `<integer>(.<integer>)?` becomes `<integer><new-symbol>` with `<new-symbol> ::= <empty> | .<integer>`.
Let us implement these rules in three steps.
#### Creating New Symbols
First, we need a mechanism to create new symbols. This is fairly straightforward.
```
def new_symbol(grammar, symbol_name="<symbol>"):
"""Return a new symbol for `grammar` based on `symbol_name`"""
if symbol_name not in grammar:
return symbol_name
count = 1
while True:
tentative_symbol_name = symbol_name[:-1] + "-" + repr(count) + ">"
if tentative_symbol_name not in grammar:
return tentative_symbol_name
count += 1
assert new_symbol(EXPR_EBNF_GRAMMAR, '<expr>') == '<expr-1>'
```
#### Expanding Parenthesized Expressions
Next, we need a means to extract parenthesized expressions from our expansions and expand them according to the rules above. Let's start with extracting expressions:
```
RE_PARENTHESIZED_EXPR = re.compile(r'\([^()]*\)[?+*]')
def parenthesized_expressions(expansion):
# In later chapters, we allow expansions to be tuples,
# with the expansion being the first element
if isinstance(expansion, tuple):
expansion = expansion[0]
return re.findall(RE_PARENTHESIZED_EXPR, expansion)
assert parenthesized_expressions("(<foo>)* (<foo><bar>)+ (+<foo>)? <integer>(.<integer>)?") == [
'(<foo>)*', '(<foo><bar>)+', '(+<foo>)?', '(.<integer>)?']
```
We can now use these to apply rule number 1, above, introducing new symbols for expressions in parentheses.
```
def convert_ebnf_parentheses(ebnf_grammar):
"""Convert a grammar in extended BNF to BNF"""
grammar = extend_grammar(ebnf_grammar)
for nonterminal in ebnf_grammar:
expansions = ebnf_grammar[nonterminal]
for i in range(len(expansions)):
expansion = expansions[i]
while True:
parenthesized_exprs = parenthesized_expressions(expansion)
if len(parenthesized_exprs) == 0:
break
for expr in parenthesized_exprs:
operator = expr[-1:]
contents = expr[1:-2]
new_sym = new_symbol(grammar)
expansion = grammar[nonterminal][i].replace(
expr, new_sym + operator, 1)
grammar[nonterminal][i] = expansion
grammar[new_sym] = [contents]
return grammar
```
This does the conversion as sketched above:
```
convert_ebnf_parentheses({"<number>": ["<integer>(.<integer>)?"]})
```
It even works for nested parenthesized expressions:
```
convert_ebnf_parentheses({"<foo>": ["((<foo>)?)+"]})
```
#### Expanding Operators
After expanding parenthesized expressions, we now need to take care of symbols followed by operators (`?`, `*`, `+`). As with `convert_ebnf_parentheses()`, above, we first extract all symbols followed by an operator.
```
RE_EXTENDED_NONTERMINAL = re.compile(r'(<[^<> ]*>[?+*])')
def extended_nonterminals(expansion):
# In later chapters, we allow expansions to be tuples,
# with the expansion being the first element
if isinstance(expansion, tuple):
expansion = expansion[0]
return re.findall(RE_EXTENDED_NONTERMINAL, expansion)
assert extended_nonterminals(
"<foo>* <bar>+ <elem>? <none>") == ['<foo>*', '<bar>+', '<elem>?']
```
Our converter extracts the symbol and the operator, and adds new symbols according to the rules laid out above.
```
def convert_ebnf_operators(ebnf_grammar):
"""Convert a grammar in extended BNF to BNF"""
grammar = extend_grammar(ebnf_grammar)
for nonterminal in ebnf_grammar:
expansions = ebnf_grammar[nonterminal]
for i in range(len(expansions)):
expansion = expansions[i]
extended_symbols = extended_nonterminals(expansion)
for extended_symbol in extended_symbols:
operator = extended_symbol[-1:]
original_symbol = extended_symbol[:-1]
assert original_symbol in ebnf_grammar, \
f"{original_symbol} is not defined in grammar"
new_sym = new_symbol(grammar, original_symbol)
grammar[nonterminal][i] = grammar[nonterminal][i].replace(
extended_symbol, new_sym, 1)
if operator == '?':
grammar[new_sym] = ["", original_symbol]
elif operator == '*':
grammar[new_sym] = ["", original_symbol + new_sym]
elif operator == '+':
grammar[new_sym] = [
original_symbol, original_symbol + new_sym]
return grammar
convert_ebnf_operators({"<integer>": ["<digit>+"], "<digit>": ["0"]})
```
#### All Together
We can combine the two, first extending parentheses and then operators:
```
def convert_ebnf_grammar(ebnf_grammar):
return convert_ebnf_operators(convert_ebnf_parentheses(ebnf_grammar))
convert_ebnf_grammar({"<authority>": ["(<userinfo>@)?<host>(:<port>)?"]})
expr_grammar = convert_ebnf_grammar(EXPR_EBNF_GRAMMAR)
expr_grammar
```
Success! We have nicely converted the EBNF grammar into BNF.
With character classes and EBNF grammar conversion, we have two powerful tools that make the writing of grammars easier. We will use these again and again as it comes to working with grammars.
### Grammar Extensions
During the course of this book, we frequently want to specify _additional information_ for grammars, such as [_probabilities_](ProbabilisticGrammarFuzzer.ipynb) or [_constraints_](GeneratorGrammarFuzzer.ipynb). To support these extensions, as well as possibly others, we define an _annotation_ mechanism.
Our concept for annotating grammars is to add _annotations_ to individual expansions. To this end, we allow that an expansion cannot only be a string, but also a _pair_ of a string and a set of attributes, as in
```python
"<expr>":
[("<term> + <expr>", opts(min_depth=10)),
("<term> - <expr>", opts(max_depth=2)),
"<term>"]
```
Here, the `opts()` function would allow us to express annotations that apply to the individual expansions; in this case, the addition would be annotated with a `min_depth` value of 10, and the subtraction with a `max_depth` value of 2. The meaning of these annotations is left to the individual algorithms dealing with the grammars; the general idea, though, is that they can be ignored.
Our `opts()` helper function returns a mapping of its arguments to values:
```
def opts(**kwargs):
return kwargs
opts(min_depth=10)
```
To deal with both expansion strings and pairs of expansions and annotations, we access the expansion string and the associated annotations via designated helper functions, `exp_string()` and `exp_opts()`:
```
def exp_string(expansion):
"""Return the string to be expanded"""
if isinstance(expansion, str):
return expansion
return expansion[0]
exp_string(("<term> + <expr>", opts(min_depth=10)))
def exp_opts(expansion):
"""Return the options of an expansion. If options are not defined, return {}"""
if isinstance(expansion, str):
return {}
return expansion[1]
def exp_opt(expansion, attribute):
"""Return the given attribution of an expansion.
If attribute is not defined, return None"""
return exp_opts(expansion).get(attribute, None)
exp_opts(("<term> + <expr>", opts(min_depth=10)))
exp_opt(("<term> - <expr>", opts(max_depth=2)), 'max_depth')
```
Finally, we define a helper function that sets a particular option:
```
def set_opts(grammar, symbol, expansion, opts=None):
"""Set the options of the given expansion of grammar[symbol] to opts"""
expansions = grammar[symbol]
for i, exp in enumerate(expansions):
if exp_string(exp) != exp_string(expansion):
continue
new_opts = exp_opts(exp)
if opts is None or new_opts == {}:
new_opts = opts
else:
for key in opts:
new_opts[key] = opts[key]
if new_opts == {}:
grammar[symbol][i] = exp_string(exp)
else:
grammar[symbol][i] = (exp_string(exp), new_opts)
return
raise KeyError(
"no expansion " +
repr(symbol) +
" -> " +
repr(
exp_string(expansion)))
```
## Checking Grammars
Since grammars are represented as strings, it is fairly easy to introduce errors. So let us introduce a helper function that checks a grammar for consistency.
The helper function `is_valid_grammar()` iterates over a grammar to check whether all used symbols are defined, and vice versa, which is very useful for debugging; it also checks whether all symbols are reachable from the start symbol. You don't have to delve into details here, but as always, it is important to get the input data straight before we make use of it.
```
import sys
def def_used_nonterminals(grammar, start_symbol=START_SYMBOL):
defined_nonterminals = set()
used_nonterminals = {start_symbol}
for defined_nonterminal in grammar:
defined_nonterminals.add(defined_nonterminal)
expansions = grammar[defined_nonterminal]
if not isinstance(expansions, list):
print(repr(defined_nonterminal) + ": expansion is not a list",
file=sys.stderr)
return None, None
if len(expansions) == 0:
print(repr(defined_nonterminal) + ": expansion list empty",
file=sys.stderr)
return None, None
for expansion in expansions:
if isinstance(expansion, tuple):
expansion = expansion[0]
if not isinstance(expansion, str):
print(repr(defined_nonterminal) + ": "
+ repr(expansion) + ": not a string",
file=sys.stderr)
return None, None
for used_nonterminal in nonterminals(expansion):
used_nonterminals.add(used_nonterminal)
return defined_nonterminals, used_nonterminals
def reachable_nonterminals(grammar, start_symbol=START_SYMBOL):
reachable = set()
def _find_reachable_nonterminals(grammar, symbol):
nonlocal reachable
reachable.add(symbol)
for expansion in grammar.get(symbol, []):
for nonterminal in nonterminals(expansion):
if nonterminal not in reachable:
_find_reachable_nonterminals(grammar, nonterminal)
_find_reachable_nonterminals(grammar, start_symbol)
return reachable
def unreachable_nonterminals(grammar, start_symbol=START_SYMBOL):
return grammar.keys() - reachable_nonterminals(grammar, start_symbol)
def opts_used(grammar):
used_opts = set()
for symbol in grammar:
for expansion in grammar[symbol]:
used_opts |= set(exp_opts(expansion).keys())
return used_opts
def is_valid_grammar(grammar, start_symbol=START_SYMBOL, supported_opts=None):
defined_nonterminals, used_nonterminals = \
def_used_nonterminals(grammar, start_symbol)
if defined_nonterminals is None or used_nonterminals is None:
return False
# Do not complain about '<start>' being not used,
# even if start_symbol is different
if START_SYMBOL in grammar:
used_nonterminals.add(START_SYMBOL)
for unused_nonterminal in defined_nonterminals - used_nonterminals:
print(repr(unused_nonterminal) + ": defined, but not used",
file=sys.stderr)
for undefined_nonterminal in used_nonterminals - defined_nonterminals:
print(repr(undefined_nonterminal) + ": used, but not defined",
file=sys.stderr)
# Symbols must be reachable either from <start> or given start symbol
unreachable = unreachable_nonterminals(grammar, start_symbol)
msg_start_symbol = start_symbol
if START_SYMBOL in grammar:
unreachable = unreachable - \
reachable_nonterminals(grammar, START_SYMBOL)
if start_symbol != START_SYMBOL:
msg_start_symbol += " or " + START_SYMBOL
for unreachable_nonterminal in unreachable:
print(repr(unreachable_nonterminal) + ": unreachable from " + msg_start_symbol,
file=sys.stderr)
used_but_not_supported_opts = set()
if supported_opts is not None:
used_but_not_supported_opts = opts_used(
grammar).difference(supported_opts)
for opt in used_but_not_supported_opts:
print(
"warning: option " +
repr(opt) +
" is not supported",
file=sys.stderr)
return used_nonterminals == defined_nonterminals and len(unreachable) == 0
```
Our grammars defined above pass the test:
```
assert is_valid_grammar(EXPR_GRAMMAR)
assert is_valid_grammar(CGI_GRAMMAR)
assert is_valid_grammar(URL_GRAMMAR)
```
The check can also be applied to EBNF grammars:
```
assert is_valid_grammar(EXPR_EBNF_GRAMMAR)
```
These ones do not pass the test, though:
```
assert not is_valid_grammar({"<start>": ["<x>"], "<y>": ["1"]})
assert not is_valid_grammar({"<start>": "123"})
assert not is_valid_grammar({"<start>": []})
assert not is_valid_grammar({"<start>": [1, 2, 3]})
```
From here on, we will always use `is_valid_grammar()` when defining a grammar.
## Synopsis
This chapter introduces _grammars_ as a simple means to specify input languages, and to use them for testing programs with syntactically valid inputs. A grammar is defined as a mapping of nonterminal symbols to lists of alternative expansions, as in the following example:
```
US_PHONE_GRAMMAR = {
"<start>": ["<phone-number>"],
"<phone-number>": ["(<area>)<exchange>-<line>"],
"<area>": ["<lead-digit><digit><digit>"],
"<exchange>": ["<lead-digit><digit><digit>"],
"<line>": ["<digit><digit><digit><digit>"],
"<lead-digit>": ["2", "3", "4", "5", "6", "7", "8", "9"],
"<digit>": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
}
assert is_valid_grammar(US_PHONE_GRAMMAR)
```
Nonterminal symbols are enclosed in angle brackets (say, `<digit>`). To generate an input string from a grammar, a _producer_ starts with the start symbol (`<start>`) and randomly chooses a random expansion for this symbol. It continues the process until all nonterminal symbols are expanded. The function `simple_grammar_fuzzer()` does just that:
```
[simple_grammar_fuzzer(US_PHONE_GRAMMAR) for i in range(5)]
```
In practice, though, instead of `simple_grammar_fuzzer()`, you should use [the `GrammarFuzzer` class](GrammarFuzzer.ipynb) or one of its [coverage-based](GrammarCoverageFuzzer.ipynb), [probabilistic-based](ProbabilisticGrammarFuzzer.ipynb), or [generator-based](GeneratorGrammarFuzzer.ipynb) derivatives; these are more efficient, protect against infinite growth, and provide several additional features.
This chapter also introduces a [grammar toolbox](#A-Grammar-Toolbox) with several helper functions that ease the writing of grammars, such as using shortcut notations for character classes and repetitions, or extending grammars
## Lessons Learned
* Grammars are powerful tools to express and produce syntactically valid inputs.
* Inputs produced from grammars can be used as is, or used as seeds for mutation-based fuzzing.
* Grammars can be extended with character classes and operators to make writing easier.
## Next Steps
As they make a great foundation for generating software tests, we use grammars again and again in this book. As a sneak preview, we can use grammars to [fuzz configurations](ConfigurationFuzzer.ipynb):
```
<options> ::= <option>*
<option> ::= -h | --version | -v | -d | -i | --global-config <filename>
```
We can use grammars for [fuzzing functions and APIs](APIFuzzer.ipynb) and [fuzzing graphical user interfaces](WebFuzzer.ipynb):
```
<call-sequence> ::= <call>*
<call> ::= urlparse(<url>) | urlsplit(<url>)
```
We can assign [probabilities](ProbabilisticGrammarFuzzer.ipynb) and [constraints](GeneratorGrammarFuzzer.ipynb) to individual expansions:
```
<term>: 50% <factor> * <term> | 30% <factor> / <term> | 20% <factor>
<integer>: <digit>+ { <integer> >= 100 }
```
All these extras become especially valuable as we can
1. _infer grammars automatically_, dropping the need to specify them manually, and
2. _guide them towards specific goals_ such as coverage or critical functions;
which we also discuss for all techniques in this book.
To get there, however, we still have bit of homework to do. In particular, we first have to learn how to
* [create an efficient grammar fuzzer](GrammarFuzzer.ipynb)
## Background
As one of the foundations of human language, grammars have been around as long as human language existed. The first _formalization_ of generative grammars was by Dakṣiputra Pāṇini in 350 BC \cite{Panini350bce}. As a general means to express formal languages for both data and programs, their role in computer science cannot be overstated. The seminal work by Chomsky \cite{Chomsky1956} introduced the central models of regular languages, context-free grammars, context-sensitive grammars, and universal grammars as they are used (and taught) in computer science as a means to specify input and programming languages ever since.
The use of grammars for _producing_ test inputs goes back to Burkhardt \cite{Burkhardt1967}, to be later rediscovered and applied by Hanford \cite{Hanford1970} and Purdom \cite{Purdom1972}. The most important use of grammar testing since then has been *compiler testing*. Actually, grammar-based testing is one important reason why compilers and Web browsers work as they should:
* The [CSmith](https://embed.cs.utah.edu/csmith/) tool \cite{Yang2011} specifically targets C programs, starting with a C grammar and then applying additional steps, such as referring to variables and functions defined earlier or ensuring integer and type safety. Their authors have used it "to find and report more than 400 previously unknown compiler bugs."
* The [LangFuzz](http://issta2016.cispa.saarland/interview-with-christian-holler/) work \cite{Holler2012}, which shares two authors with this book, uses a generic grammar to produce outputs, and is used day and night to generate JavaScript programs and test their interpreters; as of today, it has found more than 2,600 bugs in browsers such as Mozilla Firefox, Google Chrome, and Microsoft Edge.
* The [EMI Project](http://web.cs.ucdavis.edu/~su/emi-project/) \cite{Le2014} uses grammars to stress-test C compilers, transforming known tests into alternative programs that should be semantically equivalent over all inputs. Again, this has led to more than 100 bugs in C compilers being fixed.
* [Grammarinator](https://github.com/renatahodovan/grammarinator) \cite{Hodovan2018} is an open-source grammar fuzzer (written in Python!), using the popular ANTLR format as grammar specification. Like LangFuzz, it uses the grammar for both parsing and producing, and has found more than 100 issues in the *JerryScript* lightweight JavaScript engine and an associated platform.
* [Domato](https://github.com/googleprojectzero/domato) is a generic grammar generation engine that is specifically used for fuzzing DOM input. It has revealed a number of security issues in popular Web browsers.
Compilers and Web browsers, of course, are not only domains where grammars are needed for testing, but also domains where grammars are well-known. Our claim in this book is that grammars can be used to generate almost _any_ input, and our aim is to empower you to do precisely that.
## Exercises
### Exercise 1: A JSON Grammar
Take a look at the [JSON specification](http://www.json.org) and derive a grammar from it:
* Use _character classes_ to express valid characters
* Use EBNF to express repetitions and optional parts
* Assume that
- a string is a sequence of digits, ASCII letters, punctuation and space characters without quotes or escapes
- whitespace is just a single space.
* Use `is_valid_grammar()` to ensure the grammar is valid.
Feed the grammar into `simple_grammar_fuzzer()`. Do you encounter any errors, and why?
**Solution.** This is a fairly straightforward translation:
```
CHARACTERS_WITHOUT_QUOTE = (string.digits
+ string.ascii_letters
+ string.punctuation.replace('"', '').replace('\\', '')
+ ' ')
JSON_EBNF_GRAMMAR = {
"<start>": ["<json>"],
"<json>": ["<element>"],
"<element>": ["<ws><value><ws>"],
"<value>": ["<object>", "<array>", "<string>", "<number>", "true", "false", "null", "'; DROP TABLE STUDENTS"],
"<object>": ["{<ws>}", "{<members>}"],
"<members>": ["<member>(,<members>)*"],
"<member>": ["<ws><string><ws>:<element>"],
"<array>": ["[<ws>]", "[<elements>]"],
"<elements>": ["<element>(,<elements>)*"],
"<element>": ["<ws><value><ws>"],
"<string>": ['"' + "<characters>" + '"'],
"<characters>": ["<character>*"],
"<character>": srange(CHARACTERS_WITHOUT_QUOTE),
"<number>": ["<int><frac><exp>"],
"<int>": ["<digit>", "<onenine><digits>", "-<digits>", "-<onenine><digits>"],
"<digits>": ["<digit>+"],
"<digit>": ['0', "<onenine>"],
"<onenine>": crange('1', '9'),
"<frac>": ["", ".<digits>"],
"<exp>": ["", "E<sign><digits>", "e<sign><digits>"],
"<sign>": ["", '+', '-'],
# "<ws>": srange(string.whitespace)
"<ws>": [" "]
}
assert is_valid_grammar(JSON_EBNF_GRAMMAR)
JSON_GRAMMAR = convert_ebnf_grammar(JSON_EBNF_GRAMMAR)
from ExpectError import ExpectError
for i in range(50):
with ExpectError():
print(simple_grammar_fuzzer(JSON_GRAMMAR, '<object>'))
```
We get these errors because `simple_grammar_fuzzer()` first expands to a maximum number of elements, and then is limited because every further expansion would _increase_ the number of nonterminals, even though these may eventually reduce the string length. This issue is addressed in the [next chapter](GrammarFuzzer.ipynb), introducing a more solid algorithm for producing strings from grammars.
### Exercise 2: Finding Bugs
The name `simple_grammar_fuzzer()` does not come by accident: The way it expands grammars is limited in several ways. What happens if you apply `simple_grammar_fuzzer()` on `nonterminal_grammar` and `expr_grammar`, as defined above, and why?
**Solution**. `nonterminal_grammar` does not work because `simple_grammar_fuzzer()` eventually tries to expand the just generated nonterminal:
```
from ExpectError import ExpectError, ExpectTimeout
with ExpectError():
simple_grammar_fuzzer(nonterminal_grammar, log=True)
```
For `expr_grammar`, things are even worse, as `simple_grammar_fuzzer()` can start a series of infinite expansions:
```
with ExpectTimeout(1):
for i in range(10):
print(simple_grammar_fuzzer(expr_grammar))
```
Both issues are addressed and discussed in the [next chapter](GrammarFuzzer.ipynb), introducing a more solid algorithm for producing strings from grammars.
### Exercise 3: Grammars with Regular Expressions
In a _grammar extended with regular expressions_, we can use the special form
```
/regex/
```
to include regular expressions in expansions. For instance, we can have a rule
```
<integer> ::= /[+-]?[0-9]+/
```
to quickly express that an integer is an optional sign, followed by a sequence of digits.
#### Part 1: Convert regular expressions
Write a converter `convert_regex(r)` that takes a regular expression `r` and creates an equivalent grammar. Support the following regular expression constructs:
* `*`, `+`, `?`, `()` should work just in EBNFs, above.
* `a|b` should translate into a list of alternatives `[a, b]`.
* `.` should match any character except newline.
* `[abc]` should translate into `srange("abc")`
* `[^abc]` should translate into the set of ASCII characters _except_ `srange("abc")`.
* `[a-b]` should translate into `crange(a, b)`
* `[^a-b]` should translate into the set of ASCII characters _except_ `crange(a, b)`.
Example: `convert_regex(r"[0-9]+")` should yield a grammar such as
```python
{
"<start>": ["<s1>"],
"<s1>": [ "<s2>", "<s1><s2>" ],
"<s2>": crange('0', '9')
}
```
**Solution.** Left as exercise to the reader.
#### Part 2: Identify and expand regular expressions
Write a converter `convert_regex_grammar(g)` that takes a EBNF grammar `g` containing regular expressions in the form `/.../` and creates an equivalent BNF grammar. Support the regular expression constructs as above.
Example: `convert_regex_grammar({ "<integer>" : "/[+-]?[0-9]+/" })` should yield a grammar such as
```python
{
"<integer>": ["<s1><s3>"],
"<s1>": [ "", "<s2>" ],
"<s2>": srange("+-"),
"<s3>": [ "<s4>", "<s4><s3>" ],
"<s4>": crange('0', '9')
}
```
Optional: Support _escapes_ in regular expressions: `\c` translates to the literal character `c`; `\/` translates to `/` (and thus does not end the regular expression); `\\` translates to `\`.
**Solution.** Left as exercise to the reader.
### Exercise 4: Defining Grammars as Functions (Advanced)
To obtain a nicer syntax for specifying grammars, one can make use of Python constructs which then will be _parsed_ by an additional function. For instance, we can imagine a grammar definition which uses `|` as a means to separate alternatives:
```
def expression_grammar_fn():
start = "<expr>"
expr = "<term> + <expr>" | "<term> - <expr>"
term = "<factor> * <term>" | "<factor> / <term>" | "<factor>"
factor = "+<factor>" | "-<factor>" | "(<expr>)" | "<integer>.<integer>" | "<integer>"
integer = "<digit><integer>" | "<digit>"
digit = '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9'
```
If we execute `expression_grammar_fn()`, this will yield an error. Yet, the purpose of `expression_grammar_fn()` is not to be executed, but to be used as _data_ from which the grammar will be constructed.
```
with ExpectError():
expression_grammar_fn()
```
To this end, we make use of the `ast` (abstract syntax tree) and `inspect` (code inspection) modules.
```
import ast
import inspect
```
First, we obtain the source code of `expression_grammar_fn()`...
```
source = inspect.getsource(expression_grammar_fn)
source
```
... which we then parse into an abstract syntax tree:
```
tree = ast.parse(source)
```
We can now parse the tree to find operators and alternatives. `get_alternatives()` iterates over all nodes `op` of the tree; If the node looks like a binary _or_ (`|` ) operation, we drill deeper and recurse. If not, we have reached a single production, and we try to get the expression from the production. We define the `to_expr` parameter depending on how we want to represent the production. In this case, we represent a single production by a single string.
```
def get_alternatives(op, to_expr=lambda o: o.s):
if isinstance(op, ast.BinOp) and isinstance(op.op, ast.BitOr):
return get_alternatives(op.left, to_expr) + [to_expr(op.right)]
return [to_expr(op)]
```
`funct_parser()` takes the abstract syntax tree of a function (say, `expression_grammar_fn()`) and iterates over all assignments:
```
def funct_parser(tree, to_expr=lambda o: o.s):
return {assign.targets[0].id: get_alternatives(assign.value, to_expr)
for assign in tree.body[0].body}
```
The result is a grammar in our regular format:
```
grammar = funct_parser(tree)
for symbol in grammar:
print(symbol, "::=", grammar[symbol])
```
#### Part 1 (a): One Single Function
Write a single function `define_grammar(fn)` that takes a grammar defined as function (such as `expression_grammar_fn()`) and returns a regular grammar.
**Solution**. This is straightforward:
```
def define_grammar(fn, to_expr=lambda o: o.s):
source = inspect.getsource(fn)
tree = ast.parse(source)
grammar = funct_parser(tree, to_expr)
return grammar
define_grammar(expression_grammar_fn)
```
**Note.** Python allows us to directly bind the generated grammar to the name `expression_grammar_fn` using function decorators. This can be used to ensure that we do not have a faulty function lying around:
```python
@define_grammar
def expression_grammar():
start = "<expr>"
expr = "<term> + <expr>" | "<term> - <expr>"
#...
```
#### Part 1 (b): Alternative representations
We note that the grammar representation we designed previously does not allow simple generation of alternatives such as `srange()` and `crange()`. Further, one may find the string representation of expressions limiting. It turns out that it is simple to extend our grammar definition to support grammars such as below:
```
def define_name(o):
return o.id if isinstance(o, ast.Name) else o.s
def define_expr(op):
if isinstance(op, ast.BinOp) and isinstance(op.op, ast.Add):
return (*define_expr(op.left), define_name(op.right))
return (define_name(op),)
def define_ex_grammar(fn):
return define_grammar(fn, define_expr)
```
The grammar:
```python
@define_ex_grammar
def expression_grammar():
start = expr
expr = (term + '+' + expr
| term + '-' + expr)
term = (factor + '*' + term
| factor + '/' + term
| factor)
factor = ('+' + factor
| '-' + factor
| '(' + expr + ')'
| integer + '.' + integer
| integer)
integer = (digit + integer
| digit)
digit = '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9'
for symbol in expression_grammar:
print(symbol, "::=", expression_grammar[symbol])
```
**Note.** The grammar data structure thus obtained is a little more detailed than the standard data structure. It represents each production as a tuple.
We note that we have not enabled `srange()` or `crange()` in the above grammar. How would you go about adding these? (*Hint:* wrap `define_expr()` to look for `ast.Call`)
#### Part 2: Extended Grammars
Introduce an operator `*` that takes a pair `(min, max)` where `min` and `max` are the minimum and maximum number of repetitions, respectively. A missing value `min` stands for zero; a missing value `max` for infinity.
```
def identifier_grammar_fn():
identifier = idchar * (1,)
```
With the `*` operator, we can generalize the EBNF operators – `?` becomes (0,1), `*` becomes (0,), and `+` becomes (1,). Write a converter that takes an extended grammar defined using `*`, parse it, and convert it into BNF.
**Solution.** No solution yet :-)
| github_jupyter |
## widgets.image_cleaner
fastai offers several widgets to support the workflow of a deep learning practitioner. The purpose of the widgets are to help you organize, clean, and prepare your data for your model. Widgets are separated by data type.
```
from fastai.vision import *
from fastai.widgets import DatasetFormatter, ImageCleaner, ImageDownloader, download_google_images
from fastai.gen_doc.nbdoc import *
%reload_ext autoreload
%autoreload 2
path = untar_data(URLs.MNIST_SAMPLE)
data = ImageDataBunch.from_folder(path)
learn = create_cnn(data, models.resnet18, metrics=error_rate)
learn.fit_one_cycle(2)
learn.save('stage-1')
```
We create a databunch with all the data in the training set and no validation set (DatasetFormatter uses only the training set)
```
db = (ImageList.from_folder(path)
.no_split()
.label_from_folder()
.databunch())
learn = create_cnn(db, models.resnet18, metrics=[accuracy])
learn.load('stage-1');
show_doc(DatasetFormatter)
```
The [`DatasetFormatter`](/widgets.image_cleaner.html#DatasetFormatter) class prepares your image dataset for widgets by returning a formatted [`DatasetTfm`](/vision.data.html#DatasetTfm) based on the [`DatasetType`](/basic_data.html#DatasetType) specified. Use `from_toplosses` to grab the most problematic images directly from your learner. Optionally, you can restrict the formatted dataset returned to `n_imgs`.
```
show_doc(DatasetFormatter.from_similars)
from fastai.gen_doc.nbdoc import *
from fastai.widgets.image_cleaner import *
show_doc(DatasetFormatter.from_toplosses)
show_doc(ImageCleaner)
```
[`ImageCleaner`](/widgets.image_cleaner.html#ImageCleaner) is for cleaning up images that don't belong in your dataset. It renders images in a row and gives you the opportunity to delete the file from your file system. To use [`ImageCleaner`](/widgets.image_cleaner.html#ImageCleaner) we must first use `DatasetFormatter().from_toplosses` to get the suggested indices for misclassified images.
```
ds, idxs = DatasetFormatter().from_toplosses(learn)
ImageCleaner(ds, idxs, path)
```
[`ImageCleaner`](/widgets.image_cleaner.html#ImageCleaner) does not change anything on disk (neither labels or existence of images). Instead, it creates a 'cleaned.csv' file in your data path from which you need to load your new databunch for the files to changes to be applied.
```
df = pd.read_csv(path/'cleaned.csv', header='infer')
# We create a databunch from our csv. We include the data in the training set and we don't use a validation set (DatasetFormatter uses only the training set)
np.random.seed(42)
db = (ImageList.from_df(df, path)
.no_split()
.label_from_df()
.databunch(bs=64))
learn = create_cnn(db, models.resnet18, metrics=error_rate)
learn = learn.load('stage-1')
```
You can then use [`ImageCleaner`](/widgets.image_cleaner.html#ImageCleaner) again to find duplicates in the dataset. To do this, you can specify `duplicates=True` while calling ImageCleaner after getting the indices and dataset from `.from_similars`. Note that if you are using a layer's output which has dimensions <code>(n_batches, n_features, 1, 1)</code> then you don't need any pooling (this is the case with the last layer). The suggested use of `.from_similars()` with resnets is using the last layer and no pooling, like in the following cell.
```
ds, idxs = DatasetFormatter().from_similars(learn, layer_ls=[0,7,1], pool=None)
ImageCleaner(ds, idxs, path, duplicates=True)
show_doc(ImageDownloader)
```
[`ImageDownloader`](/widgets.image_downloader.html#ImageDownloader) widget gives you a way to quickly bootstrap your image dataset without leaving the notebook. It searches and downloads images that match the search criteria and resolution / quality requirements and stores them on your filesystem within the provided `path`.
Images for each search query (or label) are stored in a separate folder within `path`. For example, if you pupulate `tiger` with a `path` setup to `./data`, you'll get a folder `./data/tiger/` with the tiger images in it.
[`ImageDownloader`](/widgets.image_downloader.html#ImageDownloader) will automatically clean up and verify the downloaded images with [`verify_images()`](/vision.data.html#verify_images) after downloading them.
```
path = Config.data_path()/'image_downloader'
os.makedirs(path, exist_ok=True)
ImageDownloader(path)
```
#### Downloading images in python scripts outside Jupyter notebooks
```
path = Config.data_path()/'image_downloader'
files = download_google_images(path, 'aussie shepherd', size='>1024*768', n_images=30)
len(files)
show_doc(download_google_images)
```
After populating images with [`ImageDownloader`](/widgets.image_downloader.html#ImageDownloader), you can get a an [`ImageDataBunch`](/vision.data.html#ImageDataBunch) by calling `ImageDataBunch.from_folder(path, size=size)`, or using the data block API.
```
# Setup path and labels to search for
path = Config.data_path()/'image_downloader'
labels = ['boston terrier', 'french bulldog']
# Download images
for label in labels:
download_google_images(path, label, size='>400*300', n_images=50)
# Build a databunch and train!
src = (ImageList.from_folder(path)
.random_split_by_pct()
.label_from_folder()
.transform(get_transforms(), size=224))
db = src.databunch(bs=16, num_workers=0)
learn = create_cnn(db, models.resnet34, metrics=[accuracy])
learn.fit_one_cycle(3)
```
#### Downloading more than a hundred images
To fetch more than a hundred images, [`ImageDownloader`](/widgets.image_downloader.html#ImageDownloader) uses `selenium` and `chromedriver` to scroll through the Google Images search results page and scrape image URLs. They're not required as dependencies by default. If you don't have them installed on your system, the widget will show you an error message.
To install `selenium`, just `pip install selenium` in your fastai environment.
**On a mac**, you can install `chromedriver` with `brew cask install chromedriver`.
**On Ubuntu**
Take a look at the latest Chromedriver version available, then something like:
```
wget https://chromedriver.storage.googleapis.com/2.45/chromedriver_linux64.zip
unzip chromedriver_linux64.zip
```
Note that downloading under 100 images doesn't require any dependencies other than fastai itself, however downloading more than a hundred images [uses `selenium` and `chromedriver`](/widgets.image_cleaner.html#Downloading-more-than-a-hundred-images).
`size` can be one of:
```
'>400*300'
'>640*480'
'>800*600'
'>1024*768'
'>2MP'
'>4MP'
'>6MP'
'>8MP'
'>10MP'
'>12MP'
'>15MP'
'>20MP'
'>40MP'
'>70MP'
```
## Methods
## Undocumented Methods - Methods moved below this line will intentionally be hidden
```
show_doc(ImageCleaner.make_dropdown_widget)
show_doc(ImageCleaner.next_batch)
show_doc(DatasetFormatter.sort_idxs)
show_doc(ImageCleaner.make_vertical_box)
show_doc(ImageCleaner.relabel)
show_doc(DatasetFormatter.largest_indices)
show_doc(ImageCleaner.delete_image)
show_doc(ImageCleaner.empty)
show_doc(ImageCleaner.empty_batch)
show_doc(DatasetFormatter.comb_similarity)
show_doc(ImageCleaner.get_widgets)
show_doc(ImageCleaner.write_csv)
show_doc(ImageCleaner.create_image_list)
show_doc(ImageCleaner.render)
show_doc(DatasetFormatter.get_similars_idxs)
show_doc(ImageCleaner.on_delete)
show_doc(ImageCleaner.make_button_widget)
show_doc(ImageCleaner.make_img_widget)
show_doc(DatasetFormatter.get_actns)
show_doc(ImageCleaner.batch_contains_deleted)
show_doc(ImageCleaner.make_horizontal_box)
show_doc(DatasetFormatter.get_toplosses_idxs)
show_doc(DatasetFormatter.padded_ds)
```
## New Methods - Please document or move to the undocumented section
| github_jupyter |
# WalMart Trip Type
```
import pandas as pd
import numpy as np
import scipy.stats as stats
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels as sm
import math
import tools
plt.rcParams["figure.figsize"] = (10, 8)
mpl.style.use('bmh')
%matplotlib inline
df = pd.read_csv('input/train.csv')
u = df.groupby('VisitNumber')
```
## Look at a visit
```
u.get_group(8)
```
## How many unique items of each column are there?
```
[(x, len(df[x].unique())) for x in ['TripType', 'Upc', 'Weekday', 'DepartmentDescription', 'FinelineNumber']]
```
## What are the DepartmentDescription Factors?
```
dds = [repr(x) for x in list(set(df['DepartmentDescription']))]
dds.sort()
for d in dds:
print(d)
df['ScanCount'].describe()
df['ScanCount'].hist(bins=100)
```
## How many NA's are there by column?
```
df.isnull().sum()
```
### What is the overlap between missing NAs in different columns?
```
len(df[df['DepartmentDescription'].isnull() & df['Upc'].isnull()])
len(df[df['DepartmentDescription'].isnull() & df['FinelineNumber'].notnull()])
len(df[df['FinelineNumber'].isnull() & df['Upc'].notnull()])
```
### When finelineNumber or Upc is NA, what departments do they come from (when not also NA)?
```
df[df['FinelineNumber'].isnull() & df['DepartmentDescription'].notnull()]['DepartmentDescription'].value_counts()
df[df['Upc'].isnull() & df['DepartmentDescription'].notnull()]['DepartmentDescription'].value_counts()
```
### When Upc is NA, what are the scan counts?
```
df[df['Upc'].isnull() & df['DepartmentDescription'].notnull()]['ScanCount'].value_counts()
df[df['FinelineNumber'].isnull() & df['DepartmentDescription'].notnull()]['ScanCount'].value_counts()
```
## TripType by FineLineNumber
```
pd.crosstab(index=df['FinelineNumber'], columns=df['TripType']).idxmax()
```
## Most common DepartmentDescription for each TripType
```
pd.crosstab(index=df['DepartmentDescription'], columns=df['TripType']).idxmax()
```
## Most common Weekday for each TripType
```
pd.crosstab(index=df['Weekday'], columns=df['TripType']).idxmax()
```
## Most common TripType for each weekday
```
pd.crosstab(index=df['TripType'], columns=df['Weekday']).hist(figsize=(20,10))
```
# Clean data
```
dd = (df.pivot_table('ScanCount', ['VisitNumber'], ['DepartmentDescription']))
fln = df.pivot_table('ScanCount', ['VisitNumber'], ['FinelineNumber'])
weekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
wd = df[['VisitNumber', 'Weekday']].drop_duplicates(subset='VisitNumber')
wd['Weekday'] = wd['Weekday'].apply(lambda x: weekdays.index(x))
trip_type = df[['VisitNumber', 'TripType']].drop_duplicates(subset='VisitNumber')
dd = df[['VisitNumber', 'TripType']].drop_duplicates()
dd['TripType'].value_counts()
result = trip_type.join(dd, on='VisitNumber')
result = result.join(fln, on='VisitNumber')
result['Weekday'] = wd['Weekday']
result2 = result.fillna(0.0)
result2
df['Returns'] = df['ScanCount'].apply(lambda x: 1 if x < 0 else 0)
rtns = df.pivot_table('Returns', ['VisitNumber'], aggfunc=sum)
rtns.apply(lambda x: 1 if x > 0 else 0)
dd = list(set(df['DepartmentDescription'].fillna('')))
dd.sort()
dd
vcs = df['Upc'].value_counts()
for x in [int(x) for x in list(vcs.head(2000).index)]:
print('{}, '.format(x))
```
| github_jupyter |
# Masked vs cropped implementation for Gated PixelCNN
Hi all, in this notebook we will compare the masked implemntation of the convolutions from the Gated PixelCNN versus the alternative sugexted in the paper, the use of convolutions operaritions with appropriate croppings and padding to achieve the same result.
Let's check out!
First, we willcheck if both implementation create the same result. For this we will create a 5x5 matrix filled with ones as our input example.
```
import math
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow import nn
from tensorflow.keras import initializers
test_ones_2d = np.ones([1, 5, 5, 1], dtype='float32')
print(test_ones_2d[0,:,:,0].squeeze())
```
Now, let's copy themasked implementation that we have been using for our Gated PixelCNN models.
# Masked convolutions
```
class MaskedConv2D(keras.layers.Layer):
"""Convolutional layers with masks extended to work with Gated PixelCNN.
Convolutional layers with simple implementation of masks type A and B for
autoregressive models. Extended version to work with the verticala and horizontal
stacks from the Gated PixelCNN model.
Arguments:
mask_type: one of `"V"`, `"A"` or `"B".`
filters: Integer, the dimensionality of the output space (i.e. the number of output
filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the height and width
of the 2D convolution window.
Can be a single integer to specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides of the
convolution along the height and width.
Can be a single integer to specify the same value for all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying any
`dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
"""
def __init__(self,
mask_type,
filters,
kernel_size,
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
bias_initializer='zeros'):
super(MaskedConv2D, self).__init__()
assert mask_type in {'A', 'B', 'V'}
self.mask_type = mask_type
self.filters = filters
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
self.kernel_size = kernel_size
self.strides = strides
self.padding = padding.upper()
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape):
kernel_h, kernel_w = self.kernel_size
self.kernel = self.add_weight('kernel',
shape=(kernel_h,
kernel_w,
int(input_shape[-1]),
self.filters),
initializer=self.kernel_initializer,
trainable=True)
self.bias = self.add_weight('bias',
shape=(self.filters,),
initializer=self.bias_initializer,
trainable=True)
mask = np.ones(self.kernel.shape, dtype=np.float32)
# Get centre of the filter for even or odd dimensions
if kernel_h % 2 != 0:
center_h = kernel_h // 2
else:
center_h = (kernel_h - 1) // 2
if kernel_w % 2 != 0:
center_w = kernel_w // 2
else:
center_w = (kernel_w - 1) // 2
if self.mask_type == 'V':
mask[center_h + 1:, :, :, :] = 0.
else:
mask[:center_h, :, :] = 0.
mask[center_h, center_w + (self.mask_type == 'B'):, :, :] = 0.
mask[center_h + 1:, :, :] = 0.
self.mask = tf.constant(mask, dtype=tf.float32, name='mask')
def call(self, input):
masked_kernel = tf.math.multiply(self.mask, self.kernel)
x = nn.conv2d(input,
masked_kernel,
strides=[1, self.strides, self.strides, 1],
padding=self.padding)
x = nn.bias_add(x, self.bias)
return x
```
With this implementation, we will recreate all convolutional operation that occur inside of the Gated Block. These operations are:
- Vertical stack
- Vertical to horizontal stack
- Horizontal stack - convolution layer with mask type "A"
- Horizontal stack - convolution layer with mask type "B"
IMAGE GATED BLOCK
## Vertical stack
```
mask_type = 'V'
kernel_size = (3, 3)
conv = MaskedConv2D(mask_type=mask_type,
filters=1,
kernel_size=kernel_size,
padding='same',
kernel_initializer='ones',
bias_initializer='zeros')
result_v = conv(test_ones_2d)
print('MASK')
print(conv.mask.numpy().squeeze())
print('')
print('OUTPUT')
print(result_v.numpy().squeeze())
```
## Vertical to horizontal stack
```
padding = keras.layers.ZeroPadding2D(padding=((1, 0), 0))
cropping = keras.layers.Cropping2D(cropping=((0, 1), 0))
x = padding(result_v)
result = cropping(x)
print('INPUT')
print(result_v.numpy().squeeze())
print('')
print('OUTPUT')
print(result.numpy().squeeze())
```
## Horizontal stack - convolution layer with mask type "A"
```
mask_type = 'A'
kernel_size = (1, 3)
conv = MaskedConv2D(mask_type=mask_type,
filters=1,
kernel_size=kernel_size,
padding='same',
kernel_initializer='ones',
bias_initializer='zeros')
result = conv(test_ones_2d)
print('MASK')
print(conv.mask.numpy().squeeze())
print('')
print('OUTPUT')
print(result.numpy().squeeze())
```
## Horizontal stack - convolution layer with mask type "B"
```
mask_type = 'B'
kernel_size = (1, 3)
conv = MaskedConv2D(mask_type=mask_type,
filters=1,
kernel_size=kernel_size,
padding='same',
kernel_initializer='ones',
bias_initializer='zeros')
result = conv(test_ones_2d)
print('MASK')
print(conv.mask.numpy().squeeze())
print('')
print('OUTPUT')
print(result.numpy().squeeze())
```
Using the results of the masked approach as reference, let's check the cropped method.
# Cropped and padded convolutions
## Vertical stack
First, let's checkout this operation that some strategic padding and applying the convolution in "valid" mode to achieve the same result from the masked version.
```
kernel_h = 2
kernel_w = 3
kernel_size = (kernel_h, kernel_w)
padding = keras.layers.ZeroPadding2D(padding=((kernel_h - 1, 0), (int((kernel_w - 1) / 2), int((kernel_w - 1) / 2))))
res = padding(test_ones_2d)
conv = keras.layers.Conv2D(filters=1,
kernel_size=kernel_size,
strides=1,
padding='valid',
kernel_initializer='ones',
bias_initializer='zeros')
result_v = conv(res)
print('INPUT')
print(test_ones_2d.squeeze())
print('')
print('PADDED INPUT')
print(res.numpy().squeeze())
print('')
print('CONV FILTER')
print(conv.weights[0].numpy().squeeze())
print('')
print('OUTPUT')
print(result_v.numpy().squeeze())
```
Now, let's implement a layer that we will include all the previous operations.
```
class VerticalConv2D(keras.layers.Conv2D):
"""https://github.com/JesseFarebro/PixelCNNPP/blob/master/layers/VerticalConv2D.py"""
def __init__(self,
filters,
kernel_size,
**kwargs):
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size // 2 + 1, kernel_size)
super(VerticalConv2D, self).__init__(filters, kernel_size, **kwargs)
self.pad = tf.keras.layers.ZeroPadding2D(
(
(kernel_size[0] - 1, 0), # Top, Bottom
(kernel_size[1] // 2, kernel_size[1] // 2), # Left, Right
)
)
def call(self, inputs):
inputs = self.pad(inputs)
output = super(VerticalConv2D, self).call(inputs)
return output
kernel_h = 2
kernel_w = 3
kernel_size = (kernel_h, kernel_w)
conv = VerticalConv2D(filters=1,
kernel_size=kernel_size,
strides=1,
padding='valid',
kernel_initializer='ones',
bias_initializer='zeros')
result_v = conv(test_ones_2d)
print('INPUT')
print(test_ones_2d.squeeze())
print('')
print('CONV FILTER')
print(conv.weights[0].numpy().squeeze())
print('')
print('OUTPUT')
print(result_v.numpy().squeeze())
```
## Vertical to horizontal stack
In this operation, the implementation continue the same.
```
padding = keras.layers.ZeroPadding2D(padding=((1, 0), 0))
cropping = keras.layers.Cropping2D(cropping=((0, 1), 0))
x = padding(result_v)
result = cropping(x)
print('INPUT')
print(result_v.numpy().squeeze())
print('')
print('OUTPUT')
print(result.numpy().squeeze())
```
## Horizontal stack - convolution layer with mask type "A"
Again, let's check each operation step by step.
```
kernel_size = (1, 1)
conv = keras.layers.Conv2D(filters=1,
kernel_size=kernel_size,
strides=1,
kernel_initializer='ones',
bias_initializer='zeros')
padding = keras.layers.ZeroPadding2D(padding=(0, (1, 0)))
cropping = keras.layers.Cropping2D(cropping=(0, (0, 1)))
res = conv(test_ones_2d)
res_2 = padding(res)
res_3 = cropping(res_2)
print('INPUT')
print(test_ones_2d.squeeze())
print('')
print('CONV FILTER')
print(conv.weights[0].numpy().squeeze())
print('')
print('CONVOLUTION RESULT')
print(res.numpy().squeeze())
print('')
print('PADDED RESULT')
print(res_2.numpy().squeeze())
print('')
print('CROPPED RESULT')
print(res_3.numpy().squeeze())
```
Note: Since our input test just have one channel, the convolution 1x1 looks like did not perform any change.
## Horizontal stack - convolution layer with mask type "B"
The step by step of the mask type "B" convolution layer is a little different.
```
kernel_size = (1, 2)
kernel_h, kernel_w = kernel_size
padding = keras.layers.ZeroPadding2D(padding=((int((kernel_h - 1) / 2), int((kernel_h - 1) / 2)), (kernel_w - 1, 0)))
conv = keras.layers.Conv2D(filters=1,
kernel_size=kernel_size,
strides=1,
padding='valid',
kernel_initializer='ones',
bias_initializer='zeros')
res = padding(test_ones_2d)
result = conv(res)
print('INPUT')
print(test_ones_2d.squeeze())
print('')
print('PADDED INPUT')
print(res.numpy().squeeze())
print('')
print('CONV FILTER')
print(conv.weights[0].numpy().squeeze())
print('')
print('RESULT')
print(result.numpy().squeeze())
```
In this case, we also implemented a layer version encapsulation these operations
```
class HorizontalConv2D(keras.layers.Conv2D):
def __init__(self,
filters,
kernel_size,
**kwargs):
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size // 2 + 1,) * 2
super(HorizontalConv2D, self).__init__(filters, kernel_size, **kwargs)
self.pad = tf.keras.layers.ZeroPadding2D(
(
(kernel_size[0] - 1, 0), # (Top, Bottom)
(kernel_size[1] - 1, 0), # (Left, Right)
)
)
def call(self, inputs):
inputs = self.pad(inputs)
outputs = super(HorizontalConv2D, self).call(inputs)
return outputs
kernel_size = (1, 2)
conv = HorizontalConv2D(filters=1,
kernel_size=kernel_size,
strides=1,
kernel_initializer='ones',
bias_initializer='zeros')
result = conv(test_ones_2d)
print('INPUT')
print(test_ones_2d.squeeze())
print('')
print('CONV FILTER')
print(conv.weights[0].numpy().squeeze())
print('')
print('RESULT')
print(result.numpy().squeeze())
```
# Execution time
Now we will compare the time that takes to perform each convolutional operation.
```
import time
def measure_time(conv_fn):
exec_time = []
n_iter = 100
for _ in range(n_iter):
test_input = np.random.rand(128, 256, 256, 1).astype('float32')
start = time.time()
conv_fn(test_input)
exec_time.append(time.time() - start)
exec_time = np.array(exec_time, dtype='float32')
return exec_time.mean(), exec_time.std()
```
## Vertical stack
```
mask_type = 'V'
kernel_size = (3, 3)
masked_conv = MaskedConv2D(mask_type=mask_type,
filters=32,
kernel_size=kernel_size,
padding='same',
kernel_initializer='ones',
bias_initializer='zeros')
@tf.function
def test_masked_fn(x):
_ = masked_conv(x)
masked_time = measure_time(test_masked_fn)
# ----------------------------------------------------------------
kernel_size = (2, 3)
cropped_conv = VerticalConv2D(filters=32,
kernel_size=kernel_size,
strides=1,
padding='valid',
kernel_initializer='ones',
bias_initializer='zeros')
@tf.function
def test_cropped_fn(x):
_ = cropped_conv(x)
cropped_time = measure_time(test_cropped_fn)
# ----------------------------------------------------------------
print("Vertical stack")
print(f"Masked convolution: {masked_time[0]:.8f} +- {masked_time[1]:.8f} seconds")
print(f"Cropped padded convolution: {cropped_time[0]:.8f} +- {cropped_time[1]:.8f} seconds")
```
## Horizontal stack - convolution layer with mask type "A"
```
mask_type = 'A'
kernel_size = (1, 3)
masked_conv = MaskedConv2D(mask_type=mask_type,
filters=1,
kernel_size=kernel_size,
padding='same',
kernel_initializer='ones',
bias_initializer='zeros')
@tf.function
def test_masked_fn(x):
_ = masked_conv(x)
masked_time = measure_time(test_masked_fn)
# ----------------------------------------------------------------
kernel_size = (1, 1)
conv = keras.layers.Conv2D(filters=1,
kernel_size=kernel_size,
strides=1,
kernel_initializer='ones',
bias_initializer='zeros')
padding = keras.layers.ZeroPadding2D(padding=(0, (1, 0)))
cropping = keras.layers.Cropping2D(cropping=(0, (0, 1)))
@tf.function
def test_cropped_fn(x):
x = conv(x)
x = padding(x)
x = cropping(x)
cropped_time = measure_time(test_cropped_fn)
# ----------------------------------------------------------------
print("Horizontal stack - convolution layer with mask type 'A'")
print(f"Masked convolution: {masked_time[0]:.8f} +- {masked_time[1]:.8f} seconds")
print(f"Cropped padded convolution: {cropped_time[0]:.8f} +- {cropped_time[1]:.8f} seconds")
```
## Horizontal stack - convolution layer with mask type "B"
```
mask_type = 'B'
kernel_size = (1, 3)
masked_conv = MaskedConv2D(mask_type=mask_type,
filters=1,
kernel_size=kernel_size,
padding='same',
kernel_initializer='ones',
bias_initializer='zeros')
@tf.function
def test_masked_fn(x):
_ = masked_conv(x)
masked_time = measure_time(test_masked_fn)
# ----------------------------------------------------------------
kernel_size = (1, 2)
cropped_conv = HorizontalConv2D(filters=1,
kernel_size=kernel_size,
strides=1,
kernel_initializer='ones',
bias_initializer='zeros')
@tf.function
def test_cropped_fn(x):
_ = cropped_conv(x)
cropped_time = measure_time(test_cropped_fn)
# ----------------------------------------------------------------
print("Horizontal stack - convolution layer with mask type 'B'")
print(f"Masked convolution: {masked_time[0]:.8f} +- {masked_time[1]:.8f} seconds")
print(f"Cropped padded convolution: {cropped_time[0]:.8f} +- {cropped_time[1]:.8f} seconds")
```
Altough its looks like cropped is better in the vertical convolution, the difference does not to look very significant.
# REFERENCES
https://wiki.math.uwaterloo.ca/statwiki/index.php?title=STAT946F17/Conditional_Image_Generation_with_PixelCNN_Decoders#Gated_PixelCNN
https://www.slideshare.net/suga93/conditional-image-generation-with-pixelcnn-decoders
https://www.youtube.com/watch?v=1BURwCCYNEI
| github_jupyter |
# Comparing two Counters
Today we will look at a way of scoring the significance of differences between frequency distributions, based on a method called "Fightin' Words" by Monroe, Colaresi, and Quinn.
```
import re, sys, glob, math
import numpy
from collections import Counter
from matplotlib import pyplot
```
1. What is the encoding of the files? How are they structured? What do we need to do to separate text from non-textual words like speakers and stage directions?
2. Look at the most frequent words in the counters for comedy and tragedy. What is different? Is this view informative about differences between these two genres?
3. There is a problem calculating `log_rank`. What is it, and how can we fix it?
4. What does the `generate_scores` function do? What is the effect of the `smoothing` parameter?
5. Look at the plot showing "Fightin' Words" scores for comedy vs. tragedy. What stands out? What does this tell you about these genres in Shakespeare? What if any changes might you make to how we tokenize or otherwise pre-process the documents?
6. Create the same plot for tragedy vs. history and comedy vs. history. What is different? What words would you want to look at in their original context and why?
```
genre_directories = { "tragedy" : "shakespeare/tragedies", "comedy" : "shakespeare/comedies", "history" : "shakespeare/historical" }
word_pattern = re.compile("\w[\w\-\'’]*\w|\w")
# This counter will store the total frequency of each word type across all plays
all_counts = Counter()
# This dictionary will have one counter for each genre
genre_counts = {}
# This dictionary will have one dictionary for each genre, each containing one Counter for each play in that genre
genre_play_counts = {}
# Read the plays from files
for genre in genre_directories.keys():
genre_play_counts[genre] = {}
genre_counts[genre] = Counter()
for filename in glob.glob("{}/*.txt".format(genre_directories[genre])):
play_counter = Counter()
genre_play_counts[genre][filename] = play_counter
with open(filename, encoding="utf-8") as file: ## What encoding?
## This block reads a file line by line.
for line in file:
line = line.rstrip()
tokens = word_pattern.findall(line)
play_counter.update(tokens)
genre_counts[genre] += play_counter
all_counts += play_counter
genre_counts.keys()
genre_play_counts.keys()
genre_play_counts["comedy"].keys()
genre_play_counts["comedy"]["shakespeare/comedies/The Merry Wives of Windsor.txt"].most_common(30)
genre_counts["comedy"].most_common(15)
genre_counts["tragedy"].most_common(15)
vocabulary = [w for w, c in all_counts.most_common()]
vocabulary_size = len(vocabulary)
total_word_counts = numpy.array([all_counts[w] for w in vocabulary])
log_counts = numpy.log(total_word_counts)
word_ranks = numpy.arange(len(vocabulary))
log_ranks = numpy.log(word_ranks)
genres = genre_play_counts.keys()
pyplot.scatter(log_ranks, log_counts, alpha = 0.2)
pyplot.show()
def generate_scores(counter, smoothing = 0.0):
scores = numpy.zeros(vocabulary_size)
for word_id, word in enumerate(vocabulary):
scores[word_id] = counter[word] + smoothing
return scores
def count_difference(counter_a, counter_b, smoothing):
scores_a = generate_scores(counter_a, smoothing)
scores_b = generate_scores(counter_b, smoothing)
ratio_a = scores_a / (numpy.sum(scores_a) - scores_a)
ratio_b = scores_b / (numpy.sum(scores_b) - scores_b)
variances = (1.0/scores_a) + (1.0/scores_b)
return numpy.log(ratio_a / ratio_b) / numpy.sqrt(variances)
comedy_tragedy_scores = count_difference(genre_counts["comedy"], genre_counts["tragedy"], 0.0)
sorted_words = sorted(zip(comedy_tragedy_scores, vocabulary))
print(sorted_words[:10])
print(sorted_words[-10:])
pyplot.figure(figsize=(20, 20))
pyplot.xlim(3, 11)
pyplot.scatter(log_counts, comedy_tragedy_scores, alpha = 0.2)
for word_id, word in enumerate(vocabulary):
if numpy.abs(comedy_tragedy_scores[word_id]) + log_counts[word_id] > 7.5:
pyplot.text(log_counts[word_id], comedy_tragedy_scores[word_id], word)
pyplot.show()
```
| github_jupyter |
# 1-Getting Started
Always run this statement first, when working with this book:
```
from scipy import *
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
```
## Numbers
```
2 ** (2 + 2)
1j ** 2 # A complex number
1. + 3.0j # Another complex number
```
## Strings
```
'valid string'
"string with double quotes"
"you shouldn't forget comments"
'these are double quotes: ".." '
"""This is
a long,
long string"""
```
## Variables
```
x = [3, 4] # a list object is created
y = x # this object now has two labels: x and y
del x # we delete one of the labels
del y # both labels are removed: the object is deleted
x = [3, 4] # a list object is created
print(x)
```
## Lists
```
L1 = [5, 6]
L1[0] # 5
L1[1] # 6
L1[2] # raises IndexError
L2 = ['a', 1, [3, 4]]
L2[0] # 'a'
L2[2][0] # 3
L2[-1] # last element: [3,4]
L2[-2] # second to last: 1
print(list(range(5)))
len(['a', 1, 2, 34])
L = ['a', 'b', 'c']
L[-1] # 'c'
L.append('d')
L # L is now ['a', 'b', 'c', 'd']
L[-1] # 'd'
```
### Operations on Lists
```
L1 = [1, 2]
L2 = [3, 4]
L = L1 + L2 # [1, 2, 3, 4]
L
L = [1, 2]
3 * L # [1, 2, 1, 2, 1, 2]
```
## Boolean Expressions
```
2 >= 4 # False
2 < 3 < 4 # True
2 < 3 and 3 < 2 # False
2 != 3 < 4 or False # True
2 <= 2 and 2 >= 2 # True
not 2 == 3 # True
not False or True and False # True!
```
## Repeating statements by loops
```
L = [1, 2, 10]
for s in L:
print(s * 2) # output: 2 4 20
```
### Repeating a task
```
n = 30
k=0
for iteration in range(n):
k+= iteration #do_something(this gets executed n times)
k
```
### Break and else
```
threshold=30
x_values=range(20)
for x in x_values:
if x > threshold:
break
print(x)
for x in x_values:
if x > threshold:
break
else:
print("all the x are below the threshold")
```
## Conditional Statements
```
# The absolute value
x=-25
if x >= 0:
print(x)
else:
print(-x)
```
## Encapsulating code by functions
Example:
$$x \mapsto f(x) := 2x + 1$$
```
def f(x):
return 2*x + 1
```
Calling this function:
```
f(2) # 5
f(1) # 3
```
## Scripts and modules
```
def f(x):
return 2*x + 1
z = []
for x in range(10):
if f(x) > pi:
z.append(x)
else:
z.append(-1)
print(z)
exec(open('smartscript.py').read())
%run smartscript
```
## Simple modules - collecting Functions
For the next example to work, you need a file `smartfunctions.py`in the same folder as this notebook:
```
def f(x):
return 2*x + 1
def g(x):
return x**2 + 4*x - 5
def h(x):
return 1/f(x)
```
### Using modules and namespaces
```
import smartfunctions
print(smartfunctions.f(2))
from smartfunctions import g #import just this one function
print(g(1))
from smartfunctions import * #import all
print(h(2)*f(2))
```
## Interpreter
```
def f(x):
return y**2
a = 3 # here both a and f are defined
f(2) # error, y is not defined
```
| github_jupyter |
# Road Follower - Train Model
In this notebook we will train a neural network to take an input image, and output a set of x, y values corresponding to a target.
We will be using PyTorch deep learning framework to train ResNet18 neural network architecture model for road follower application.
```
import torch
import torch.optim as optim
import torch.nn.functional as F
import torchvision
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
import glob
import PIL.Image
import os
import numpy as np
```
### Download and extract data
Before you start, you should upload the ``road_following_<Date&Time>.zip`` file that you created in the ``data_collection.ipynb`` notebook on the robot.
> If you're training on the JetBot you collected data on, you can skip this!
You should then extract this dataset by calling the command below:
```
!unzip -q road_following.zip
```
You should see a folder named ``dataset_all`` appear in the file browser.
### Create Dataset Instance
Here we create a custom ``torch.utils.data.Dataset`` implementation, which implements the ``__len__`` and ``__getitem__`` functions. This class
is responsible for loading images and parsing the x, y values from the image filenames. Because we implement the ``torch.utils.data.Dataset`` class,
we can use all of the torch data utilities :)
We hard coded some transformations (like color jitter) into our dataset. We made random horizontal flips optional (in case you want to follow a non-symmetric path, like a road
where we need to 'stay right'). If it doesn't matter whether your robot follows some convention, you could enable flips to augment the dataset.
```
def get_x(path):
"""Gets the x value from the image filename"""
return (float(int(path[3:6])) - 50.0) / 50.0
def get_y(path):
"""Gets the y value from the image filename"""
return (float(int(path[7:10])) - 50.0) / 50.0
class XYDataset(torch.utils.data.Dataset):
def __init__(self, directory, random_hflips=False):
self.directory = directory
self.random_hflips = random_hflips
self.image_paths = glob.glob(os.path.join(self.directory, '*.jpg'))
self.color_jitter = transforms.ColorJitter(0.3, 0.3, 0.3, 0.3)
def __len__(self):
return len(self.image_paths)
def __getitem__(self, idx):
image_path = self.image_paths[idx]
image = PIL.Image.open(image_path)
x = float(get_x(os.path.basename(image_path)))
y = float(get_y(os.path.basename(image_path)))
if float(np.random.rand(1)) > 0.5:
image = transforms.functional.hflip(image)
x = -x
image = self.color_jitter(image)
image = transforms.functional.resize(image, (224, 224))
image = transforms.functional.to_tensor(image)
image = image.numpy()[::-1].copy()
image = torch.from_numpy(image)
image = transforms.functional.normalize(image, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
return image, torch.tensor([x, y]).float()
dataset = XYDataset('dataset_xy', random_hflips=False)
```
### Split dataset into train and test sets
Once we read dataset, we will split data set in train and test sets. In this example we split train and test a 90%-10%. The test set will be used to verify the accuracy of the model we train.
```
test_percent = 0.1
num_test = int(test_percent * len(dataset))
train_dataset, test_dataset = torch.utils.data.random_split(dataset, [len(dataset) - num_test, num_test])
```
### Create data loaders to load data in batches
We use ``DataLoader`` class to load data in batches, shuffle data and allow using multi-subprocesses. In this example we use batch size of 64. Batch size will be based on memory available with your GPU and it can impact accuracy of the model.
```
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=8,
shuffle=True,
num_workers=0
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=8,
shuffle=True,
num_workers=0
)
```
### Define Neural Network Model
We use ResNet-18 model available on PyTorch TorchVision.
In a process called transfer learning, we can repurpose a pre-trained model (trained on millions of images) for a new task that has possibly much less data available.
More details on ResNet-18 : https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
More Details on Transfer Learning: https://www.youtube.com/watch?v=yofjFQddwHE
```
model = models.resnet18(pretrained=True)
```
ResNet model has fully connect (fc) final layer with 512 as ``in_features`` and we will be training for regression thus ``out_features`` as 1
Finally, we transfer our model for execution on the GPU
```
model.fc = torch.nn.Linear(512, 2)
device = torch.device('cuda')
model = model.to(device)
```
### Train Regression:
We train for 50 epochs and save best model if the loss is reduced.
```
NUM_EPOCHS = 70
BEST_MODEL_PATH = 'best_steering_model_xy.pth'
best_loss = 1e9
optimizer = optim.Adam(model.parameters())
for epoch in range(NUM_EPOCHS):
model.train()
train_loss = 0.0
for images, labels in iter(train_loader):
images = images.to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = model(images)
loss = F.mse_loss(outputs, labels)
train_loss += float(loss)
loss.backward()
optimizer.step()
train_loss /= len(train_loader)
model.eval()
test_loss = 0.0
for images, labels in iter(test_loader):
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
loss = F.mse_loss(outputs, labels)
test_loss += float(loss)
test_loss /= len(test_loader)
print('%f, %f' % (train_loss, test_loss))
if test_loss < best_loss:
torch.save(model.state_dict(), BEST_MODEL_PATH)
best_loss = test_loss
```
Once the model is trained, it will generate ``best_steering_model_xy.pth`` file which you can use for inferencing in the live demo notebook.
If you trained on a different machine other than JetBot, you'll need to upload this to the JetBot to the ``road_following`` example folder.
| github_jupyter |
<h1 align="center"> Logistic Regression (Preloaded Dataset) </h1>
scikit-learn comes with a few small datasets that do not require to download any file from some external website. The digits dataset we will use is one of these small standard datasets. These datasets are useful to quickly illustrate the behavior of the various algorithms implemented in the scikit. They are however often too small to be representative of real world machine learning tasks. After learning the basics of logisitic regression, we will use the MNIST Handwritten digit database
<b>Each datapoint is a 8x8 image of a digit.</b>
Parameters | Number
--- | ---
Classes | 10
Samples per class | ~180
Samples total | 1797
Dimensionality | 64
Features | integers 0-16
```
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Used for Confusion Matrix
from sklearn import metrics
%matplotlib inline
digits = load_digits()
digits.data.shape
digits.target.shape
```
## Showing the Images and Labels
```
plt.figure(figsize=(20,4))
for index, (image, label) in enumerate(zip(digits.data[0:5], digits.target[0:5])):
plt.subplot(1, 5, index + 1)
plt.imshow(np.reshape(image, (8,8)), cmap=plt.cm.gray)
plt.title('Training: %i\n' % label, fontsize = 20)
```
## Splitting Data into Training and Test Sets
```
# test_size: what proportion of original data is used for test set
x_train, x_test, y_train, y_test = train_test_split(
digits.data, digits.target, test_size=0.25, random_state=0)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
```
## Scikit-learn 4-Step Modeling Pattern
<b>Step 1: </b> Import the model you want to use
In sklearn, all machine learning models are implemented as Python classes
```
from sklearn.linear_model import LogisticRegression
```
<b>Step 2:</b> Make an instance of the Model
```
logisticRegr = LogisticRegression()
```
<b>Step 3:</b> Training the model on the data, storing the information learned from the data
Model is learning the relationship between x (digits) and y (labels)
```
logisticRegr.fit(x_train, y_train)
```
<b>Step 4</b>: Predict the labels of new data (new images)
Uses the information the model learned during the model training process
```
# Returns a NumPy Array
# Predict for One Observation (image)
logisticRegr.predict(x_test[0].reshape(1,-1))
# Predict for Multiple Observations (images) at Once
logisticRegr.predict(x_test[0:10])
# Make predictions on entire test data
predictions = logisticRegr.predict(x_test)
predictions.shape
```
## Measuring Model Performance
accuracy (fraction of correct predictions): correct predictions / total number of data points
Basically, how the model performs on new data (test set)
```
# Use score method to get accuracy of model
score = logisticRegr.score(x_test, y_test)
print(score)
```
## Confusion Matrix (Matplotlib)
A confusion matrix is a table that is often used to describe the performance of a classification model (or "classifier") on a set of test data for which the true values are known.
```
def plot_confusion_matrix(cm, title='Confusion matrix', cmap='Pastel1'):
plt.figure(figsize=(9,9))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, size = 15)
plt.colorbar()
tick_marks = np.arange(10)
plt.xticks(tick_marks, ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], rotation=45, size = 10)
plt.yticks(tick_marks, ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], size = 10)
plt.tight_layout()
plt.ylabel('Actual label', size = 15)
plt.xlabel('Predicted label', size = 15)
width, height = cm.shape
for x in xrange(width):
for y in xrange(height):
plt.annotate(str(cm[x][y]), xy=(y, x),
horizontalalignment='center',
verticalalignment='center')
# confusion matrix
confusion = metrics.confusion_matrix(y_test, predictions)
print('Confusion matrix')
print(confusion)
plt.figure()
plot_confusion_matrix(confusion);
plt.show();
```
## Confusion Matrix (Seaborn)
<b>Note: Seaborn needs to be installed for this portion </b>
```
# !conda install seaborn -y
# Make predictions on test data
predictions = logisticRegr.predict(x_test)
cm = metrics.confusion_matrix(y_test, predictions)
#cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
all_sample_title = 'Accuracy Score: {0}'.format(score)
plt.title(all_sample_title, size = 15);
```
## Display Misclassified images with Predicted Labels
```
index = 0
misclassifiedIndex = []
for predict, actual in zip(predictions, y_test):
if predict != actual:
misclassifiedIndex.append(index)
index +=1
plt.figure(figsize=(20,4))
for plotIndex, wrong in enumerate(misclassifiedIndex[10:15]):
plt.subplot(1, 5, plotIndex + 1)
plt.imshow(np.reshape(x_test[wrong], (8,8)), cmap=plt.cm.gray)
plt.title('Predicted: {}, Actual: {}'.format(predictions[wrong], y_test[wrong]), fontsize = 20)
```
Part 2 of the tutorial is located here: [MNIST Logistic Regression](https://github.com/mGalarnyk/Python_Tutorials/blob/master/Sklearn/Logistic_Regression/LogisticRegression_MNIST.ipynb)
<b>if this tutorial doesn't cover what you are looking for, please leave a comment on the youtube video and I will try to cover what you are interested in. </b>
[youtube video](https://www.youtube.com/watch?v=71iXeuKFcQM)
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.