code stringlengths 2.5k 150k | kind stringclasses 1 value |
|---|---|
# ResNet-101 on CIFAR-10
### Imports
```
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
```
### Settings and Dataset
```
# Device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Hyperparameters
random_seed = 1
learning_rate = 0.001
num_epochs = 10
batch_size = 128
torch.manual_seed(random_seed)
# Architecture
num_features = 784
num_classes = 10
# Data
train_dataset = datasets.CIFAR10(root='data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = datasets.CIFAR10(root='data',
train=False,
transform=transforms.ToTensor())
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Checking the dataset
for images, labels in train_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
break
```
### Model
```
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes, grayscale):
self.inplanes = 64
if grayscale:
in_dim = 1
else:
in_dim = 3
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(in_dim, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1, padding=2)
self.fc = nn.Linear(2048, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, (2. / n)**.5)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.view(x.size(0), -1)
logits = self.fc(x)
probas = F.softmax(logits, dim=1)
return logits, probas
def ResNet101(num_classes):
model = ResNet(block=Bottleneck,
layers=[3, 4, 23, 3],
num_classes=num_classes,
grayscale=False)
return model
model = ResNet101(num_classes)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
```
### Training
```
def compute_accuracy(model, data_loader):
correct_pred, num_examples = 0, 0
for i, (features, targets) in enumerate(data_loader):
features = features.to(device)
targets = targets.to(device)
logits, probas = model(features)
_, predicted_labels = torch.max(probas, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100
for epoch in range(num_epochs):
model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.to(device)
targets = targets.to(device)
# Forward and Backprop
logits, probas = model(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
# update model paramets
optimizer.step()
# Logging
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'
%(epoch+1, num_epochs, batch_idx,
len(train_loader), cost))
model.eval()
with torch.set_grad_enabled(False):
print('Epoch: %03d/%03d | Train: %.3f%% ' %(
epoch+1, num_epochs,
compute_accuracy(model, train_loader)))
```
### Evaluation
```
with torch.set_grad_enabled(False):
print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader)))
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
import torch
```
$\textbf{Definitions:}$ $\\$
$\mbox{---Gradient---}$ $\\$ $\\$
Vector formed by partial derivatives of scalar function, f(x) in which $x = \begin{bmatrix} x_1 \\ x_2 \\ \vdots \\x_n \end{bmatrix}$ $\\$
Gradient maps $\mathbb{R}^n \rightarrow \mathbb{R}$ $\\$
$$\nabla f(\mathbf{x})=\frac{\partial f(\mathbf{x})}{\partial x_1}\hat{x}_1+\frac{\partial f(\mathbf{x})}{\partial x_2}\hat{x}_2+\ldots+\frac{\partial f(\mathbf{x})}{\partial x_n}\hat{x}_n$$
$$\nabla f(x) = \left[\frac{\partial f}{\partial x_1}\frac{\partial f}{\partial x_2}\dots\frac{\partial f}{\partial x_n}\right]$$
Note: Input is a column vector, outputs a row vector. $\\$
Gradient is the rate of change wrt each dimension/component and corresponds to steepest slope due to linear independence used for gradient descent. $\\$
$\mbox{---Jacobian---}$ $\\$
Matrix formed by partial derivatives of vector function of scalar functions, maps $\mathbb{R}^n \rightarrow \mathbb{R}^m$ $\\$
$$J_\mathbf{f} = \frac{\partial (f_1,\ldots,f_m)}{\partial(x_1,\ldots,x_n)} = \left[
\begin{matrix}
\frac{\partial f_1}{\partial x_1} & \cdots & \frac{\partial f_1}{\partial x_n} \\
\vdots & \ddots & \vdots \\
\frac{\partial f_m}{\partial x_1} & \cdots & \frac{\partial f_m}{\partial x_n}
\end{matrix}
\right]$$
The Jacobian is the gradient applied to multiple rows, commonly used as a change of basis/unit conversion:
$$\iiint_R f(x,y,z) \,dx\,dy\,dz = \iiint_S f(x(u,v,w),y(u,v,w),z(u,v,w))\left|\frac{\partial (x,y,z)}{\partial(u,v,w)}\right|\,du\,dv\,dw$$
Note: Gradient = Jacobian if $m = 1$.
$\mbox{---Hessian---}$ $\\$
Gradient applied to Gradient, Double Gradient: $\\$
$$\begin{align}D[\nabla f(\mathbf x)] &= D[D[f(\mathbf x)]]\\
&=\left(D\left[\frac{\partial f}{\partial x_1}\right]^T, \ldots, D\left[\frac{\partial f}{\partial x_n}\right]^T\right)\end{align}$$
Which expands to give us the Hessian matrix:
$$D^2[f(\mathbf x)]=\left(\begin{matrix}\frac{\partial^2 f}{\partial x_1^2} & \ldots & \frac{\partial^2 f}{\partial x_1\partial x_n}\\
\vdots & \ddots & \vdots \\
\frac{\partial^2 f}{\partial x_n\partial x_1}& \ldots & \frac{\partial^2 f}{\partial x_n^2}\end{matrix}\right)$$
Note: Inputs are column vectors, outputs are row vectors. (Transposed first because the first gradient outputs a row vector) $\\$
The Hessian represents the rate of change of gradient, analogous to curvature. Used to computationally determine the position of a min/max point in optimization, which is darn impossible to visualize past 2 dimensions.
$\textbf{Analytic Gradient:}$ $\\$
$\mbox{---Linear Form---}$ $\\$
$f(x) = a^T x$ $\\$
Component-wise derivative yields corresponding dot-product coefficent of each component k. $\\$
Assemble each partial derivatives into vector: $\\$
$\nabla f(x) = \begin{bmatrix} a_1 \\ a_2 \\ \vdots \\ a_n \end{bmatrix} = a$
-General Linear Form: $\\$
$f(x) = a^Tx + b$ $\\$
$\nabla f(x) = a$
$\mbox{---Quadratic Form---}$ $\\$
$f(x) = x^T A x$ $\\$
Tracing through 2x2 example: $\\$
$\nabla f(x) = (A + A^T)x$ $\\$
For pd matrices, $A = A^T$ so: $\\$
$\nabla f(x) = 2Ax$
-General Quadratic Form, which builds from gradient of general linear form: $\\$
$f(x) = \frac{1}{2}x^T A x + b^Tx + c$ $\\$
$\nabla f(x) = \frac{1}{2}(A^T + A)x + b$ $\\$
For symmteric matrix A: $\\$
$\nabla f(x) = Ax + b$
-Mixed Quadratic Form: $\\$
$f(x,y) = x^T A y$ $\\$
Wrt x: $\nabla_x f(x,y) = Ay$ $\\$
Wrt y: $\nabla_y f(x,y) = A^Tx$ $\\$
Taking the right partial derivative, transpose. $\\$
Matrices are pd, so wrt y: $\nabla_y f(x,y) = Ax$
$\textbf{Analytic Hessian:}$ $\\$
Tracing through 2x2 example again: $\\$
$\mbox{---Linear Form---}$ $\\$
$f(x) = a^T x$ $\\$
$\nabla f(x)$ does not depend on x, so $\nabla^2 f(x) = 0$.
$\mbox{---Quadratic Form---}$ $\\$
$f(x) = x^T A x$ $\\$
$\nabla^2 f(x) = A + A^T$ $\\$
For symmteric matrix A: $\\$
$\nabla^2 f(x) = 2A$
Mixed Quadratic Form: $\\$
$f(x,y) = x^T A y$ $\\$
Wrt xx, yy: $H_{xx} = H_{yy} = 0$ $\\$
Wrt xy, yx: $H_{xy} = H_{yx} = 2A$
Simultaneous gradient descent (continuous time):
$\dot x = -D_1f_1(x,y),\ \dot y = -D_2f_2(x,y)$, simgrad Jacobian
$J(x,y) = \begin{bmatrix} D_1^2f_1(x,y) & D_{12}f_1(x,y) \\ D_{21}f_2(x,y) & D_2^2f_2(x,y) \end{bmatrix}$
(discrete time):
$x^+ = x - \gamma_x D_1f_1(x,y),\
y^+ = y - \gamma_y D_2f_2(x,y)$
```
m = 2
n = 2
#Random pd matrices, Cholesky form:
np.random.seed(0)
A1 = np.random.randn(n,n)
A1 = A1.T @ A1
A2 = np.random.randn(n,n)
A2 = A2.T @ A2
#Random matricies, not pd:
B1 = np.random.randn(n,m)
B2 = np.random.randn(n,m)
C1 = np.random.randn(m,m)
C2 = np.random.randn(m,m)
#Define e,h vectors:
e1 = np.random.randn(n)
e2 = np.random.randn(m)
h1 = np.random.randn(m)
h2 = np.random.randn(n)
#Convert Matrices into tensors
A1 = torch.tensor(A1, dtype = torch.float)
A2 = torch.tensor(A2, dtype = torch.float)
B1 = torch.tensor(B1, dtype = torch.float)
B2 = torch.tensor(B2, dtype = torch.float)
C1 = torch.tensor(C1, dtype = torch.float)
C2 = torch.tensor(C2, dtype = torch.float)
e1 = torch.tensor(e1, dtype = torch.float)
e2 = torch.tensor(e2, dtype = torch.float)
h1 = torch.tensor(h1, dtype = torch.float)
h2 = torch.tensor(h2, dtype = torch.float)
x1 = torch.ones((n, 1), requires_grad=True)
x2 = torch.ones((m, 1), requires_grad=True)
#Generic Quadratic Cost:
#B_ij, C_ij still rather vague
def f1(x1,x2):
return (0.5 * x1.t() @ A1 @ x1) + (x1.t() @ B1 @ x2) + (0.5 * x2.t() @ C1 @ x2) + (e1.t() @ x1) + (h1.t() @ x2)
def f2(x1,x2):
return (0.5 * x2.t() @ A2 @ x2) + (x2.t() @ B2 @ x1) + (0.5 * x1.t() @ C2 @ x1) + (e2.t() @ x2) + (h2.t() @ x1)
#Analytical Gradient:
#D wrt x1:
def D1f1(x1,x2):
return (A @ x1) + 0.5 * (B1 @ x2) + e1
def D1f2(x1,x2):
return 0.5 * (B2.t() @ x2) + (C2 @ x1) + h2
#D wrt x2:
def D2f1(x1,x2):
return 0.5 * (B1.t() @ x1) + (C1 @ x2) + h1
def D2f2(x1,x2):
return (A2 @ x2) + 0.5 * (B2 @ x1) + e2
#Analytical Hessian:
#H wrt x1:
def H11f1(x1, x2):
return A1
def H11f2(x1, x2):
return C2
#H wrt x2:
def H22f1(x1, x2):
return C1
def H22f2(x1, x2):
return A2
#Computational Gradient:
#tensors = [tensor.zero_grad() for tensor in tensors]
'''
-Possible solutions:
-Make functions just expressions
-use backward
-Seeing an example would be nice
'''
print(f1(x1,x2).grad)
#Computational Hessian:
#print(torch.autograd(x1).autograd(x1))
#print(torch.autograd(x2).autograd(x2))
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Meghababu1999/sserd/blob/main/Untitled2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import pandas as pd
import numpy as np
import math as m
G={'Name of pulsar':['J0205+6449','J0218+4232','J0437-4715','J0534+2200','J1105-6107','J1124-5916','J1617-5055','J1930+1852','J2124-3358','J2229+6114'],'po':[0.06571592849324,0.00232309053151224,0.005757451936712637,0.0333924123,0.0632021309179,0.13547685441,0.069356847,0.136855046957,0.00493111494309662,0.05162357393],'pdot':[1.93754256e-13,7.73955e-20,5.729215e-20,4.20972e-13,1.584462e-14,7.52566e-13,1.351e-13,7.5057e-13,2.05705e-20,7.827e-14],'D in Kpc':[3.200,3.150,0.157,2.000,2.360, 5.000, 4.743, 7.000, 0.410, 3.000],'Age':[5.37e+03,4.76e+08,1.59e+09,1.26e+03,6.32e+04,2.85e+03, 8.13e+03, 2.89e+03, 3.8e+09, 1.05e+04],'B_s':[3.61e+12,4.29e+08,4.29e+08,3.79e+12,1.01e+12,1.02e+13,3.1e+12,1.03e+13,3.22e+08,2.03e+12],
'Edot':[2.7e+37,2.4e+35,1.2e+34,4.5e+38,2.5e+36,1.2e+37,1.6e+37,1.2e+37,6.8e+33,2.2e+37],
'Edot2':[2.6e+36,2.5e+34,4.8e+35,1.1e+38,4.4e+35,4.8e+35,7.1e+35,2.4e+35,2.4e+35,2.5e+36],
'B_Lc':[1.19e+05,3.21e+05,2.85e+04,9.55e+05,3.76e+04,3.85e+04, 8.70e+04, 3.75e+04, 2.52e+04, 1.39e+05]}
l=pd.DataFrame(G)
l.to_csv('High_energy_pulsar.csv')
l
R={'Name of pulsar':['J0100-7211','J0525-6607','J1708-4008','J1808-2024','J1809-1943','J1841-0456','J1907+0919','J2301+5852','J1745-2900','J0525-6607'],'po':[8.020392, 0.35443759451370,11.0062624,7.55592,5.540742829,11.7889784, 5.198346,6.9790709703,3.763733080,8.0470],'pdot':[1.88e-11, 7.36052e-17,1.960e-11,5.49e-10,2.8281e-12,4.092e-11,9.2e-11,4.7123e-13,1.756e-11,6.5e-11],'D in Kpc':[59.700, 1.841,3.800,13.000,3.600,9.600,'NaN',3.300,8.300,'NaN'],'Age':[6.76e+03,7.63e+07,8.9e+03,218,3.1e+04,4.57e+03,895,2.35e+05,3.4e+03,1.96e+03],'B_s':[3.93e+14,1.63e+11,4.7e+14,2.06e+15,1.27e+14,7.03e+14,7e+14,5.8e+13,2.6e+14,7.32e+14],'Edot':[1.4e+33,6.5e+31,5.8e+32,5.0e+34,6.6e+32,9.9e+32,2.6e+34,5.5e+31,1.3e+34,4.9e+33],'Edot2':[4.0e+29,1.9e+31,4.0e+31,3.0e+32,5.1e+31,1.1e+31,'NaN',5.0e+30,1.9e+32,'NaN'],'B_Lc':[7.14e+00,3.44e+01,3.30e+00,4.48e+01,6.98e+00,4.02e+00,4.67e+01,1.60e+00,4.57e+01,1.32e+01]}
c=pd.DataFrame(R)
c.to_csv('magnetor_pulsar.csv')
c
A={'Name of pulsar':['J0537-6910','J0633+1746','J0543+2329','J1811-1925','J1846-0258','J0628+0909','J0633+1746','J0636-4549','J1811-4930','J1812-1718'],'po':[0.0161222220245,0.2370994416923,0.245983683333,0.06466700,0.32657128834,3.763733080,0.2370994416923,1.98459736713,1.4327041968,1.20537444137],'pdot':[5.1784338e-14,1.097087e-14,1.541956e-14,4.40e-14,7.107450e-12,0.5479e-15,1.097087e-14,3.1722e-15,2.254e-15,1.9077e-14],'D in Kpc':[49.700,0.190,1.565,5.000,5.800,1.771,0.190,0.383,1.447,3.678],'Age':[4.93e+03,3.42e+05,2.53e+05,2.33e+04,728,3.59e+07,3.42e+05,9.91e+06,1.01e+07,1e+06],'B_s':[9.25e+11,1.63e+12,1.97e+12,1.71e+12,4.88e+13,8.35e+11,1.63e+12,2.54e+12,1.82e+12,4.85e+12],'Edot':[4.9e+38,3.2e+34,4.1e+34,6.4e+36,8.1e+36,1.1e+31,3.2e+34,1.6e+31,3.0e+31,4.3e+32],'Edot2':[2.0e+35,9.0e+35,1.7e+34,2.6e+35,2.4e+35,3.6e+30,9.0e+35,1.1e+32,1.4e+31,3.2e+31],'B_Lc':[2.07e+06,1.15e+03,1.24e+03,5.92e+04,1.31e+04,4.09e+00,1.15e+03,3.05e+00,5.80e+00,2.60e+01]}
p=pd.DataFrame(A)
p.to_csv('Non_Radio_pulsar.csv')
p
o=pd.concat([l,p,c],ignore_index=True)
o.to_csv('Combined_data.csv')
o
age_comb = o['Age'] # charactersitic age (yr) #comb - combined data
dist_comb = o['D in Kpc'] # distance in kpc
p_0_comb = o['po'] # period of rotation (s)
pdot_comb = o['pdot'] # time derivative of period
# radio luminosity at 400 MHz (mJy kpc**2)
b_s_comb = o['B_s'] # surface dipole magnetic field (Gauss)
e_dot_comb = o['Edot'] # spin down energy loss rate (erg s**-1)
e_dot2_comb = o['Edot2'] # energy flux at sun (ergs s**-1 kpc**-2)
# surface magnetic dipole from P_1_i (period derivative corrected for schklovski effect) (Gauss)
b_lc_comb = o['B_Lc'] # Magnetic field at light cylinder (Gauss)
#Radio high energy pulsars
age_r = l['Age'] # charactersitic age (yr) # r - radio
dist_r = l['D in Kpc'] # distance in kpc
p_0_r = l['po'] # period of rotation (s)
pdot_r = l['pdot'] # time derivative of period # radio luminosity at 400 MHz (mJy kpc**2)
b_s_r = l['B_s'] # surface dipole magnetic field (Gauss)
e_dot_r = l['Edot'] # spin down energy loss rate (erg s**-1)
e_dot2_r = l['Edot2'] # energy flux at sun (ergs s**-1 kpc**-2)
# surface magnetic dipole from P_1_i (period derivative corrected for schklovski effect) (Gauss)
b_lc_r = l['B_Lc'] # Magnetic field at light cylinder (Gauss)
# Non radio Pulsars
age_nr = p['Age'] # charactersitic age (yr) #nr - non radio
dist_nr = p['D in Kpc'] # distance in kpc
p_0_nr = p['po'] # period of rotation (s)
pdot_nr = p['pdot'] # time derivative of period
# radio luminosity at 400 MHz (mJy kpc**2)
b_s_nr = p['B_s'] # surface dipole magnetic field (Gauss)
e_dot_nr = p['Edot'] # spin down energy loss rate (erg s**-1)
e_dot2_nr = p['Edot2'] # energy flux at sun (ergs s**-1 kpc**-2)
# surface magnetic dipole from P_1_i (period derivative corrected for schklovski effect) (Gauss)
b_lc_nr = p['B_Lc'] # Magnetic field at light cylinder (Gauss)
# Magnetars
age_m = c['Age'] # charactersitic age (yr) #m - magnetars
dist_m = c['D in Kpc'] # distance in kpc
p_0_m = c['po'] # period of rotation (s)
pdot_m = c['pdot'] # time derivative of period
# radio luminosity at 400 MHz (mJy kpc**2)
b_s_m = c['B_s'] # surface dipole magnetic field (Gauss)
e_dot_m = c['Edot'] # spin down energy loss rate (erg s**-1)
e_dot2_m = c['Edot2'] # energy flux at sun (ergs s**-1 kpc**-2)
# surface magnetic dipole from P_1_i (period derivative corrected for schklovski effect) (Gauss)
b_lc_m = c['B_Lc'] # Magnetic field at light cylinder (Gauss)
import seaborn as sns
sns.pairplot(o)
import matplotlib.pyplot as plt
Ir=e_dot_r*p_0_r**3/(4*np.pi**2*pdot_r)
print(Ir)
plt.scatter(e_dot_r,Ir)
plt.title('Radio high energy pulsar')
plt.xlabel('spin down luminosity e_dot (ergs s^-1)')
plt.ylabel('moment of inertia I (g.cm^2)')
import matplotlib.pyplot as plt
Inr=e_dot_nr*p_0_nr**3/(4*np.pi**2*pdot_nr)
plt.scatter(e_dot_nr,Inr,marker='d')
plt.title('non Radio pulsar')
plt.xlabel('spin down luminosity e_dot (ergs s^-1)')
plt.ylabel('moment of inertia I (g.cm^2)')
Inr
import matplotlib.pyplot as plt
Im=e_dot_m*p_0_m**3/(4*np.pi**2*pdot_m)
plt.scatter(e_dot_m,Im,marker='d')
plt.title('MAGNETARS')
plt.xlabel('spin down luminosity e_dot (ergs s^-1)')
plt.ylabel('moment of inertia I (g.cm^2)')
Im
import matplotlib.pyplot as plt
Ic=e_dot_comb*p_0_comb**3/(4*np.pi**2*pdot_comb)
plt.scatter(e_dot_comb,Ic,marker='d')
plt.title('MAGNETARS')
plt.xlabel('spin down luminosity e_dot (ergs s^-1)')
plt.ylabel('moment of inertia I (g.cm^2)')
Ic
plt.title('combined data')
plt.scatter(e_dot_m,Im,marker='s')
plt.scatter(e_dot_nr,Inr,marker='d')
plt.scatter(e_dot_r,Ir)
plt.legend(['magnetars','non radio','radio'])
plt.xlabel('spin down luminosity e_dot (ergs s^-1)')
plt.ylabel('moment of inertia I (g.cm^2)')
plt.figure(figsize = (16,11))
plt.subplot(231)
plt.hist(Ir)
plt.title('Radio Pulsars - High Energy')
plt.xlabel('Moment of inertia I (g.cm^2)')
plt.ylabel('Numer of Pulsars in that range')
plt.subplot(232)
plt.hist(Inr, color='k')
plt.title('Non Radio Pulsars')
plt.xlabel('Moment of inertia I (g.cm^2)')
plt.ylabel('Numer of Pulsars in that range')
plt.subplot(233)
plt.hist(Im, color='r')
plt.title('Magnetars')
plt.xlabel('Moment of inertia I (g.cm^2)')
plt.ylabel('Numer of Pulsars in that range')
plt.subplot(235)
plt.hist([Ir, Inr, Im])
plt.legend(['Radio', 'Non Radio', 'Magnetars'])
plt.title('Combined data')
plt.xlabel('Moment of inertia I (g.cm^2)')
plt.ylabel('Numer of Pulsars in that range')
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
import itertools
import pymc3 as pm
%matplotlib inline
#Seismic displacement in three-dimensions
#Defining the main parameters of the model
rho0 = 1800 #Density of the homogeneous medium in kg/m^3, as taken from Beker's thesis paper, pg. 84
alpha = 0.31 #Parameter used in Beker's paper to determine primary wave speed pg. 84
beta = 0.25 #Parameter used in Beker's paper to determine primary wave speed pg 84.
nu = 0.25 #Poisson ratio as taken from Beker's thesis paper pg. 84
#Calculating the wave speeds using above paramters
CP = ((rho0/1000) / alpha)**(1.0 / beta) #Calculating primary wave speed using equation from Beker's paper, pg. 84
CS = np.sqrt((1-2*nu)/(2-2*nu)) * CP #Calculating secondary wave speed using equation from Beker's paper, pg. 84
Root = np.roots([1, -8, 8 * ((2 - nu)/(1 - nu)), -8 / (1 - nu)]) #Calculating the the ratio of the R wave speed to the p wave speed squared using equation found in Harm's and Beker's paper, pg. 20 in Beker's paper
for i in Root:
if 0<i<1:
CR = np.sqrt(CS**2 *i) #calculating R wave speed
x2_list = np.linspace(0, 500, 24) #x-values to be evaluated
y2_list = np.linspace(0, 500, 24) #y-values to be evaluated
z2_list = np.array([0]) #z-values to be evaluated
t2_list = np.linspace(2, 4, 50) #time interval to be evaluated over
#Calculating seismic displacement using equation from Harm's paper "Terrestial Gravity Fluctuations", pg. 31
def xi_horiz(x, y, z, t, f, theta, phi):
omega = 2*np.pi*f #calculating the angular frequency
ke = omega / CR #Calculating horizontal wave number of the Rayleigh wave
ke_vec = [np.cos(theta) * ke, np.sin(theta) * ke] #Rayleigh wave vector in x-y plane
ks = omega / CS #Calculatin the secondary wave number of the Rayleigh wave
kp = omega / CP #Calculating the primary wave number of the Rayleigh wave
q_z_s = np.sqrt(ke**2 - ks**2) #Calculating wave parameter used in Harm's model, pg. 31
q_z_p = np.sqrt(ke**2 - kp**2) #Calculating wave parameter used in Harm's model, pg. 31
zeta = np.sqrt(q_z_p / q_z_s) #Calculating wave parameter used in Harm's model, pg. 32
return (ke * np.exp(q_z_p * z) - zeta * np.exp(q_z_s * z)) * np.sin(np.dot(ke_vec, [x,y]) - omega * t + phi)
def xi_vert(x, y, z, t, f, theta, phi):
omega = 2*np.pi*f #calculating the angular frequency
ke = omega / CR #Calculating horizontal wave number of the Rayleigh wave
ke_vec = [np.cos(theta) * ke, np.sin(theta) * ke] #Rayleigh wave vector in x-y plane
ks = omega / CS #Calculatin the secondary wave number of the Rayleigh wave
kp = omega / CP #Calculating the primary wave number of the Rayleigh wave
q_z_s = np.sqrt(ke**2 - ks**2) #Calculating wave parameter used in Harm's model, pg. 31
q_z_p = np.sqrt(ke**2 - kp**2) #Calculating wave parameter used in Harm's model, pg. 31
zeta = np.sqrt(q_z_p / q_z_s) #Calculating wave parameter used in Harm's model, pg. 32
return (q_z_p * np.exp(q_z_p * z) - zeta * ke * np.exp(q_z_s * z)) * np.cos(np.dot(ke_vec, [x,y]) - omega * t + phi)
#Defining displacement vectors to be used in the Newtonian Noise calculation, according to Harm's definition in "Terrestial Gravity Fluctuations" on pg.32
def xi_vect(x, y, z, t, f, theta, phi):
return np.array([np.cos(theta) * xi_horiz(x, y, z, t, f, theta, phi), np.sin(theta) * xi_horiz(x, y, z, t, f, theta, phi), xi_vert(x, y, z, t, f, theta, phi)])
def xi_horiz_vect(x, y, z, t, f, theta, phi):
return np.array([np.cos(theta) * xi_horiz(x, y, z, t, f, theta, phi), np.sin(theta) * xi_horiz(x, y, z, t, f, theta, phi)])
#Newtonian noise from an impulse of a Rayleigh wave in three dimensions
#Defining constants
G = 6.67e-11 #Newton's constant of gravitation
rho0 = 1800 #Density of the medium
x2_list = np.linspace(0, 500, 31) #x-values to be evaluated
y2_list = np.linspace(0, 500, 31) #y-values to be evaluated
z2_list = np.array([0]) #z-values to be evaluated
t2_list = np.linspace(0, 1, 200) #time interval to be evaluated over
V = (500 * 500 * 3) / (len(x2_list) * len(y2_list) * len(z2_list))
#Calculating the Seismic Newtonian Noise contribution of a single point using equation (4.13) from Beker's thesis paper on pg. 92
def seisnn3d(x, y, z, t, f, theta, phi, x0, y0, z0):
r = [x - x0, y - y0, z-z0] #vector from point to test mass
r_mag = np.linalg.norm(r) #magnitude of r vector
r_hat = r / r_mag #unit vector in r direction
xi = xi_vect(x, y, z, t, f, theta, phi)
return G * rho0 * V * (1.0 / r_mag**3) * (xi - 3 * np.dot(r_hat, xi) * r_hat)
grid = itertools.product(x2_list, y2_list, z2_list) #Creating Cartesian product of the lists for each direction
gridlist = list(grid) #Creating list of the Cartesian products
points = len(x2_list) * len(y2_list) * len(z2_list) #Calculatin the number of points in the array
nnlist = np.zeros((1,1,points,3)) #Defining list to place the Newtonian Noise calculation at each point into
#Defining a spiral array to use for the Newtonian Noise calculation
pointlist = np.array([[[0,0,0], [np.pi * np.sqrt(3) / 12, np.pi / 12, 0], [np.pi * np.sqrt(2) / 8, np.pi * np.sqrt(2) / 8, 0], [-np.pi / 3, np.pi * np.sqrt(3) / 3, 0], [-np.pi, 0, 0], [- 5 * np.pi * np.sqrt(2) / 8, -5 * np.pi * np.sqrt(2) / 8, 0], [0, -3 * np.pi / 2, 0], [11 * np.pi * np.sqrt(3)/12, -11*np.pi/12,0], [2*np.pi, 0, 0], [7 * np.pi/6, 7*np.pi*np.sqrt(3)/6,0], [17*np.pi*np.sqrt(3)/12, 17*np.pi/2, 0]]])
#Defining function to calculate total Seismic Newtonian Noise at a single point in time
def seisnn3d_total(t, f, theta, phi, x0, y0, z0):
for i, p in enumerate(pointlist): #Using the points defined in the spiral array for the calculations
x = pointlist[0,i,0] #Referencing the x-coordinate of each Cartesian product in gridlist
y = pointlist[0,i,1] #Referencing the y-coordinate of each Cartesian product in gridlist
z = pointlist[0,i,2] #Referencing the z-coordinate of each Cartesian product in gridlist
if ((-1.0 / np.tan(theta)) * x + (CR * t - (CR / 2)) / np.cos(theta)) <= y <= ((-1.0 / np.tan(theta)) * x + (CR * t) / np.cos(theta)):
nnlist[0,0,i] = seisnn3d(x, y, z, t, f, theta, phi, x0, y0, z0)
else:
nnlist[0,0,i] = np.array([0,0,0])
nntotal = np.sum(nnlist, 2)
nntotal_mag = np.linalg.norm(nntotal)
return nntotal_mag
T_list = np.zeros(len(t2_list))
for i, tn in enumerate(t2_list):
T_list[i] = seisnn3d_total(tn, 2, np.pi/4, 0, 250, 250, 1)
fig = plt.figure(figsize =(20,10))
ax = fig.add_subplot(111, xlabel = 'Time (Sec.)', ylabel = 'Total Seismic NN', title = 'Seismic NN due to Propagating Rayleigh Wave')
ax.plot(t2_list, T_list)
#Newtonian noise from a Rayleigh wave in three dimensions
#Defining constants
G = 6.67e-11 #Newton's constant of gravitation
x2_list = np.linspace(0, 500, 201) #x-values to be evaluated
y2_list = np.linspace(0, 500, 201) #y-values to be evaluated
z2_list = np.array([0]) #z-values to be evaluated
t2_list = np.linspace(3, 5, 50) #time interval to be evaluated over
V = (500 * 500 * 100)/(len(x2_list) * len(y2_list) * len(z2_list))
#Calculating the Seismic Newtonian Noise contribution of a single point using equation (4.13) from Beker's thesis paper on pg. 92
def seisnn3d(x, y, z, t, f, theta, phi, x0, y0, z0):
r = [x - x0, y - y0, z - z0] #vector from point to test mass
r_mag = np.linalg.norm(r) #magnitude of r vector
r_hat = r / r_mag #unit vector in r direction
xi = xi_vect(x, y, z, t, f, theta, phi)
return G * rho0 * V * (1.0 / r_mag**3) * (xi - 3 * np.dot(r_hat, xi) * r_hat)
grid = itertools.product(x2_list, y2_list, z2_list) #Creating Cartesian product of the lists for each direction
gridlist = list(grid) #Creating list of the Cartesian products
points = len(x2_list) * len(y2_list) * len(z2_list) #Calculatin the number of points in the array
nnlist = np.zeros((1,1,points,3)) #Defining list to place the Newtonian Noise calculation at each point into
#Defining a spiral array to use for the Newtonian Noise calculation
pointlist = np.array([[[0,0,0], [np.pi * np.sqrt(3) / 12, np.pi / 12, 0], [np.pi * np.sqrt(2) / 8, np.pi * np.sqrt(2) / 8, 0], [-np.pi / 3, np.pi * np.sqrt(3) / 3, 0], [-np.pi, 0, 0], [- 5 * np.pi * np.sqrt(2) / 8, -5 * np.pi * np.sqrt(2) / 8, 0], [0, -3 * np.pi / 2, 0], [11 * np.pi * np.sqrt(3)/12, -11*np.pi/12,0], [2*np.pi, 0, 0], [7 * np.pi/6, 7*np.pi*np.sqrt(3)/6,0], [17*np.pi*np.sqrt(3)/12, 17*np.pi/2, 0]]])
#Defining function to calculate total Seismic Newtonian Noise at a single point in time
def seisnn3d_total(t, f, theta, phi, x0, y0, z0):
for i, p in enumerate(gridlist):
x = gridlist[i][0] #Referencing the x-coordinate of each Cartesian product in gridlist
y = gridlist[i][1] #Referencing the y-coordinate of each Cartesian product in gridlist
z = gridlist[i][2] #Referencing the z-coordinate of each Cartesian product in gridlist
nnlist[0,0,i] = seisnn3d(x, y, z, t, f, theta, phi, x0, y0, z0)
nntotal = np.sum(nnlist, 2)
nntotal_mag = np.linalg.norm(nntotal)
return nntotal_mag
T_list = np.zeros(len(t2_list))
for i, tn in enumerate(t2_list):
T_list[i] = seisnn3d_total(tn, 2, np.pi/4, 0, 250,250,1)
fig = plt.figure(figsize =(20,10))
ax = fig.add_subplot(111, xlabel = 'Time (Sec.)', ylabel = 'Total Seismic NN', title = 'Seismic NN due to Propagating Rayleigh Wave')
ax.plot(t2_list, T_list)
np.sum(T_list) / len(T_list)
np.sum(T_list) / len(T_list)
```
| github_jupyter |
```
import sys
sys.path.append("..") # Adds higher directory to python modules path.
from pathlib import Path
import glob
import numpy as np
import tensorflow as tf
import pickle
import matplotlib.pyplot as plt
import random
import pickle
import os
import config
import data
import random
from natsort import natsorted
import lfp
import gym
arm = 'UR5'
TEST_DATASET = "UR5_slow_gripper_test"
print('Using local setup')
WORKING_PATH = Path().absolute().parent
print(f'Working path: {WORKING_PATH}')
os.chdir(WORKING_PATH)
STORAGE_PATH = WORKING_PATH
print(f'Storage path: {STORAGE_PATH}')
TRAIN_DATA_PATHS = [STORAGE_PATH/'data'/x for x in ["pybullet/UR5" , "pybullet/UR5_high_transition" ,"pybullet/UR5_slow_gripper"]]
TEST_DATA_PATH = STORAGE_PATH/'data'/TEST_DATASET
import roboticsPlayroomPybullet
env = gym.make('UR5PlayAbsRPY1Obj-v0')
env.render('human')
_ = env.reset()
env.render('playback')
env.instance.calc_state()['observation'][0:7]
env.step(np.array([ -1.91859640e-02, 1.93180365e-01, 0.2, 0.0,
0.0, 0.0, -7.02553025e-06]))
plt.figure(figsize = (20,20))
plt.imshow(env.instance.calc_state()['img'][:,:,:])
```
# Replays the teleop data
- This little loop of code replays the teleop data, and optionally saves the images to create an image dataset
- Every 30 steps it resets state, because minor errors in the physics compound
```
TRAIN_DATA_PATHS
for DIR in TRAIN_DATA_PATHS:
DIR = str(DIR)
# DIR = str(TRAIN_DATA_PATHS[0]) # glob/natsorted prefer strings
obs_act_path = DIR+'/obs_act_etc/'
o, a, ag = [], [], []
for demo in natsorted(os.listdir(obs_act_path)):
traj = np.load(obs_act_path+demo+'/data.npz')
print(demo, len(traj['obs']))
o.append(traj['obs']), a.append(traj['acts']), ag.append(traj['achieved_goals'])
print('________________________', len(np.vstack(o)))
o, a, ag = np.vstack(o), np.vstack(a), np.vstack(ag)
import time
jp = traj['joint_poses']
ag = traj['achieved_goals']
for i in range(0, len(jp)):
time.sleep(0.02)
env.instance.reset_arm_joints(env.instance.arm, jp[i,:])
env.instance.reset_object_pos(ag[])
o.shape
env.reset(o[0,:])
d = a
for i in range(0, d.shape[1]):
plt.hist(d[:,i], bins=1000)
#plt.xlim(-0.2,0.2)
plt.show()
d = a - o[:, :7]
for i in range(0, d.shape[1]):
plt.hist(d[:,i], bins=1000)
plt.xlim(-0.2,0.2)
plt.show()
d = d[1:] - d[:-1]
d = o[150000:150020]
f = a[150000:150020]
for i in range(0, d.shape[1]):
plt.plot(np.linspace(0,len(d),len(d)), d[:,i])
plt.plot(np.linspace(0,len(d),len(d)), f[:,i])
plt.show()
import scipy.misc
from IPython.display import display, clear_output
keys = ['obs', 'acts', 'achieved_goals', 'joint_poses', 'target_poses', 'acts_quat', 'acts_rpy_rel', 'velocities', 'obs_quat']
#
for DIR in TRAIN_DATA_PATHS:
DIR = str(DIR) # glob/natsorted prefer strings
obs_act_path = DIR+'/obs_act_etc/'
for demo in natsorted(os.listdir(obs_act_path)):
print(demo)
start_points = natsorted(glob.glob(DIR+'/states_and_ims/'+str(demo)+'/env_states/*.bullet'))
traj = np.load(obs_act_path+demo+'/data.npz')
d = {k:traj[k] for k in keys}
acts = d['acts']
set_len = len(acts)
start = 0
end= min(start+30, set_len)
print(DIR+'/states_and_ims/'+str(demo)+'/ims')
try:
os.makedirs(DIR+'/states_and_ims/'+str(demo)+'/ims')
except:
pass
for start_point in start_points:
env.p.restoreState(fileName=start_point)
env.instance.updateToggles() # need to do it when restoring, colors not carried over
for i in range(start, end):
o,r,_,_ = env.step(acts[i])
start += 30
end = min(start+30, set_len)
import scipy.misc
from IPython.display import display, clear_output
keys = ['obs', 'acts', 'achieved_goals', 'joint_poses', 'target_poses', 'acts_quat', 'acts_rpy_rel', 'velocities', 'obs_quat', 'gripper_proprioception']
#
for DIR in TRAIN_DATA_PATHS:
obs_act_path = DIR/'obs_act_etc/'
obs_act_path2 = DIR + 'obs_act_etc2/'
for demo in natsorted(os.listdir(obs_act_path)):
print(demo)
start_points = natsorted(glob.glob(DIR+'/states_and_ims/'+str(demo)+'/env_states/*.bullet'))
traj = np.load(obs_act_path+demo+'/data.npz')
d = {k:traj[k] for k in keys}
acts = d['acts']
set_len = len(acts)
start = 0
end= min(start+30, set_len)
print(DIR+'/states_and_ims/'+str(demo)+'/ims')
try:
os.makedirs(DIR+'/states_and_ims/'+str(demo)+'/ims')
except:
pass
for start_point in start_points:
env.p.restoreState(fileName=start_point)
env.panda.updateToggles() # need to do it when restoring, colors not carried over
for i in range(start, end):
#scipy.misc.imsave(DIR+'/states_and_ims/'+str(demo)+'/ims/'+str(i)+'.jpg', o['img'])
o,r,_,_ = env.step(acts[i])
# clear_output(wait=True)
# fig = plt.imshow(scipy.misc.imread(DIR+'/states_and_ims/'+str(demo)+'/ims/'+str(i)+'.jpg'))
# plt.show()
#time.sleep(0.05)
start += 30
end = min(start+30, set_len)
# try:
# os.makedirs(obs_act_path2+demo)
# except:
# pass
# np.savez(obs_act_path2+demo+'/data', obs=d['obs'], acts=d['acts'], achieved_goals=d['achieved_goals'],
# joint_poses=d['joint_poses'],target_poses=d['target_poses'], acts_quat=d['acts_quat'],
# acts_rpy_rel=d['acts_rpy_rel'], velocities = d['velocities'],
# obs_quat=d['obs_quat'], gripper_proprioception=d['gripper_proprioception'])
env.p.restoreState(fileName=path)
vid_path = 'output/videos/trajectory.mp4'
with imageio.get_writer(vid_path, mode='I') as writer:
for i in range(start, start+WINDOW_SIZE):
o ,r, d, _ = env.step(actions[i,:])
writer.append_data(o['img'])
clear_output(wait=True)
fig = plt.imshow(o['img'])
plt.show()
keys = ['obs', 'acts', 'achieved_goals', 'joint_poses', 'target_poses', 'acts_quat', 'acts_rpy_rel', 'velocities', 'obs_quat', 'gripper_proprioception']
for DIR in [TRAIN_DATA_PATHS[1]]:
obs_act_path = os.path.join(DIR, 'obs_act_etc/')
for demo in natsorted(os.listdir(obs_act_path)):
if int(demo)>18:
print(demo)
start_points = natsorted(glob.glob(str(DIR/'states_and_ims'/str(demo)/'env_states/*.bullet')))
traj = np.load(obs_act_path+demo+'/data.npz')
d = {k:traj[k] for k in keys}
acts = d['acts']
set_len = len(acts)
start = 0
end= min(start+30, set_len)
gripper_proprioception = []
for start_point in start_points:
env.p.restoreState(fileName=start_point)
for i in range(start, end):
o,r,_,_ = env.step(acts[i])
#print(d['gripper_proprioception'][i])
time.sleep(0.015)
start += 30
end = min(start+30, set_len)
#dataset, cnt = data.create_single_dataset(dataset_path)
def load_data(path, keys):
dataset = {k:[] for k in keys+['sequence_index','sequence_id']}
obs_act_path = os.path.join(path, 'obs_act_etc/')
for demo in natsorted(os.listdir(obs_act_path)):
print(demo)
traj = np.load(obs_act_path+demo+'/data.npz')
for k in keys:
d = traj[k]
if len(d.shape) < 2:
d = np.expand_dims(d, axis = 1) # was N, should be N,1
dataset[k].append(d.astype(np.float32))
timesteps = len(traj['obs'])
dataset['sequence_index'].append(np.arange(timesteps, dtype=np.int32).reshape(-1, 1))
dataset['sequence_id'].append(np.full(timesteps, fill_value=int(demo), dtype=np.int32).reshape(-1, 1))
# convert to numpy
for k in keys+['sequence_index','sequence_id']:
dataset[k] = np.vstack(dataset[k])
return dataset
keys = ['obs', 'acts', 'achieved_goals', 'joint_poses', 'target_poses', 'acts_rpy', 'acts_rpy_rel', 'velocities', 'obs_rpy', 'obs_rpy_inc_obj', 'gripper_proprioception']
dataset = load_data(UR5, keys)
#transition_dataset = load_data(UR5_25, keys)
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
tfpl = tfp.layers
scaling = np.array([256.0/4, 256.0/2]).astype(np.float32)
def logistic_mixture(inputs, quantized = True):
weightings, mu, scale = inputs
print(mu.shape, scaling.shape, scale.shape, weightings.shape)
mu = mu*np.expand_dims(scaling,1)
print(mu)
dist = tfd.Logistic(loc=mu, scale=scale)
if quantized:
dist = tfd.QuantizedDistribution(
distribution=tfd.TransformedDistribution(
distribution=dist,
bijector=tfb.Shift(shift=-0.5)),
low=-128.,
high=128.
)
mixture_dist = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(logits=weightings),
components_distribution=dist,
validate_args=True
)
print(mixture_dist)
if quantized:
quantized_scale = 1/scaling
mixture_dist = tfd.TransformedDistribution(
distribution=mixture_dist,
bijector=tfb.Scale(scale=quantized_scale)
)
return mixture_dist
mu = np.array([[[-1.5, 0.4, 0.4],[-0.2, 0.3, 0.3]]]).astype(np.float32)
std = np.array([[[1.0,1.0,1],[1.0,1.0,1]]]).astype(np.float32)
weights = np.array([[[1,1,1],[1,1,1]]]).astype(np.float32)
m = logistic_mixture((weights,mu,std))
#m = logistic_mixture(([1], [0.06], [1]))
m.sample()
samples = np.array([m.sample().numpy() for i in range(0,100)])
samples.shape
samples[:,0]
plt.hist(np.array(samples[:,:,0]), bins=100)
plt.plot(np.linspace(-0.5, 0.5, 100),m.log_prob(np.linspace(-0.5, 0.5, 100)))
# Coverage analysis
np.set_printoptions(suppress=True)
ag = dataset['achieved_goals']
t_ag = transition_dataset['achieved_goals']
def see_diff(ag):
diff_ag = abs(np.sum(ag[1:]-ag[:-1],axis = -1))
print(sum(diff_ag == 0))
plt.plot(diff_ag)
see_diff(ag[:150000])
see_diff(t_ag[:150000])
mins = np.min(dataset['achieved_goals'], axis = 0)
maxes = np.max(dataset['achieved_goals'], axis = 0)
bins = np.linspace(mins,maxes+0.01, 11)
idx = 0
qs = []
for idx in range(0,ag.shape[1]):
quantiles = np.digitize(dataset['achieved_goals'][:,idx], bins[:,idx])
qs.append(quantiles)
qs = np.array(qs).T
qs.shape
np.unique(qs, axis=0).shape[0]
from tqdm import tqdm
step2 = []
count2 = []
for i in tqdm(np.linspace(1, len(qs), 10)):
i = int(i)
step2.append(i)
count2.append(np.unique(qs[:i], axis=0).shape[0])
import matplotlib.pyplot as plt
#plt.plot(step, count)
plt.plot(step2, count2)
import matplotlib.pyplot as plt
plt.plot(step, count)
d['']
print(obs_act_path2+demo)
try:
os.makedirs(obs_act_path2+demo)
except:
pass
np.savez(obs_act_path2+demo+'/data', obs=d['obs'], acts=d['acts'], achieved_goals=d['achieved_goals'],
joint_poses=d['joint_poses'],target_poses=d['target_poses'], acts_rpy=d['acts_rpy'],
acts_rpy_rel=d['acts_rpy_rel'], velocities = d['velocities'],
obs_rpy=d['obs_rpy'], gripper_proprioception=d['gripper_proprioception'])
d['obs']
np.load(obs_act_path2+demo+'/data.npz', allow_pickle=True)['obs']
os.make_dirs(obs_act_path2)
env.step(acts[i])
print(start_points)
rpy_obs = 'obs_rpy' #'rpy_obs'
def load_data(path, keys):
dataset = {k:[] for k in keys+['sequence_index','sequence_id']}
obs_act_path = os.path.join(path, 'obs_act_etc/')
for demo in natsorted(os.listdir(obs_act_path)):
print(demo)
traj = np.load(obs_act_path+demo+'/data.npz')
for k in keys:
dataset[k].append(traj[k].astype(np.float32))
timesteps = len(traj['obs'])
dataset['sequence_index'].append(np.arange(timesteps, dtype=np.int32).reshape(-1, 1))
dataset['sequence_id'].append(np.full(timesteps, fill_value=int(demo), dtype=np.int32).reshape(-1, 1))
# convert to numpy
for k in keys+['sequence_index','sequence_id']:
dataset[k] = np.vstack(dataset[k])
return dataset
keys = ['obs', 'acts', 'achieved_goals', 'joint_poses', 'target_poses', 'acts_rpy', 'acts_rpy_rel', 'velocities', 'obs_rpy']
dataset = load_data(PYBULLET_DATA_DIR, keys)
obs_act_path = os.path.join(path, 'obs_act_etc/')
starts = []
idxs = []
fs = []
for f in natsorted(os.listdir(obs_act_path)):
potential_start_points = glob.glob(TEST_DIR+'/states_and_ims/'+str(f)+'/env_states/*.bullet')
potential_start_idxs = [int(x.replace('.bullet','').replace(f"{TEST_DIR}/states_and_ims/{str(f)}/env_states/", "")) for x in potential_start_points]
folder = [f]*len(potential_start_idxs)
[starts.append(x) for x in potential_start_points], [idxs.append(x) for x in potential_start_idxs], [fs.append(x) for x in folder]
descriptions = {
1: 'lift up',
2: 'take down',
3: 'door left',
4: 'door right',
5: 'drawer in',
6: 'drawer out',
7: 'pick place',
8: 'press button',
9: 'dial on',
10: 'dial off',
11: 'rotate block left',
12: 'rotate block right',
13: 'stand up block',
14: 'knock down block',
15: 'block in cupboard right',
16: 'block in cupboard left',
17: 'block in drawer',
18: 'block out of drawer',
19: 'block out of cupboard right',
20: 'block out of cupboard left',
}
trajectory_labels = {}
done = []
import time
for i in range(0,len(starts)):
if starts[i] not in done:
data = np.load(TEST_DIR+'obs_act_etc/'+str(fs[i])+'/data.npz')
traj_len = 40#random.randint(40,50)
end = min(len(data['acts'])-1,idxs[i]+traj_len )
acts = data['acts_rpy'][idxs[i]:end]
value = "r"
while value == "r":
env.p.restoreState(fileName=starts[i])
for a in range(0, len(acts)):
env.step(acts[a])
time.sleep(0.01)
value = input("Label:")
if value == 's':
break
elif value == 'r':
pass
else:
trajectory_labels[starts[i]] = descriptions[int(value)]
done.append(starts[i])
np.savez("trajectory_labels", trajectory_labels=trajectory_labels, done=done)
len(starts)
for k,v in trajectory_labels.items():
if v == 'knock':
trajectory_labels[k] = 'knock down block'
starts[i]
left = np.load(TEST_DIR+'left_right.npz')['left']
right = np.load(TEST_DIR+'left_right.npz')['right']
left_complete = []
right_complete = []
for pth in left:
f = pth.split('/')[7]
i = pth.split('/')[9].replace('.bullet', '')
data = np.load(TEST_DIR+'obs_act_etc/'+f+'/data.npz')
o = data['obs'][int(i):int(i)+40]
a = data['acts_rpy'][int(i):int(i)+40]
pth = pth.replace('/content/drive/My Drive/Robotic Learning/UR5_25Hz_test_suite/', TEST_DIR)
left_complete.append((pth, o, a))
for pth in right:
f = pth.split('/')[7]
i = pth.split('/')[9].replace('.bullet', '')
data = np.load(TEST_DIR+'obs_act_etc/'+f+'/data.npz')
o = data['obs'][int(i):int(i)+40]
a = data['acts_rpy'][int(i):int(i)+40]
pth = pth.replace('/content/drive/My Drive/Robotic Learning/UR5_25Hz_test_suite/', TEST_DIR)
right_complete.append((pth, o, a))
for i in range(0,50):
pth, obs, acts = left_complete[np.random.choice(len(left_complete))]
env.p.restoreState(fileName=pth)
for a in range(0, len(acts)):
env.step(acts[a])
time.sleep(0.001)
for i in range(0,50):
pth, obs, acts = right_complete[np.random.choice(len(right_complete))]
env.p.restoreState(fileName=pth)
for a in range(0, len(acts)):
env.step(acts[a])
time.sleep(0.001)
obs_left = np.array([x[1] for x in left_complete])
obs_right = np.array([x[1] for x in right_complete])
import seaborn as sns
fig, axs = plt.subplots(ncols=4, nrows=5,figsize=(20, 20),)
for x in range(0, obs_left.shape[2]):
shape = obs_left.shape
sns.distplot(np.reshape(obs_left[:], [shape[0] * shape[1], shape[2]])[:,x], hist=True, kde=True,
bins=int(180/5), color = 'darkblue',
hist_kws={'edgecolor':'black'},
kde_kws={'linewidth': 4}, ax=axs[mapping[x][0], mapping[x][1]])
shape = obs_right.shape
sns.distplot(np.reshape(obs_right[:], [shape[0] * shape[1], shape[2]])[:,x], hist=True, kde=True,
bins=int(180/5), color = 'orange',
hist_kws={'edgecolor':'orange'},
kde_kws={'linewidth': 4}, ax=axs[mapping[x][0], mapping[x][1]])
plt.show()
acts_left = np.array([x[2] for x in left_complete])
acts_right = np.array([x[2] for x in right_complete])
import seaborn as sns
fig, axs = plt.subplots(ncols=4, nrows=2,figsize=(20, 20),)
for x in range(0, acts_left.shape[2]):
shape = acts_left.shape
sns.distplot(np.reshape(acts_left[:], [shape[0] * shape[1], shape[2]])[:,x], hist=True, kde=True,
bins=int(180/5), color = 'darkblue',
hist_kws={'edgecolor':'black'},
kde_kws={'linewidth': 4}, ax=axs[mapping[x][0], mapping[x][1]])
shape = acts_right.shape
sns.distplot(np.reshape(acts_right[:], [shape[0] * shape[1], shape[2]])[:,x], hist=True, kde=True,
bins=int(180/5), color = 'orange',
hist_kws={'edgecolor':'orange'},
kde_kws={'linewidth': 4}, ax=axs[mapping[x][0], mapping[x][1]])
plt.show()
mapping = []
for i in range(0,5):
for j in range(0,4):
mapping.append([i,j])
mapping
obs_left.shape[2]-1
arm_pos = [0.29, -0.01, 0.51]
b= [0.25, 0.11, 0.02]
realsense_y= translation[2] - bb[0]
realsense_x = translation[1] - bb[1]
realsense_z = translation[0] - bb[2]
# Testing camera transforms
camera_coord = (20,20)
plt.scatter(camera_coord[0], 480-camera_coord[1], s=40)
plt.xlim(0,480)
plt.ylim(0,480)
import math
def gripper_frame_to_robot_frame(x,y, angle):
y=-y
X = x*math.cos(angle) - y*math.sin(angle)
Y = x*math.sin(angle) + y*math.cos(angle)
return X, Y
current_angle = 0.22
gripper_frame_to_robot_frame(0.02,-0.02, math.pi/2)
path = os.getcwd()+ '/sapien_simulator/config/ur5e.srdf' # '/ocrtoc_task/urdf/ur5e.urdf'
p.loadURDF(path)
height =
os.path.exists(path)
# Testing that diversity does increase with more training data
t_it = iter(train_dataset)
mins = np.min(dataset['obs_rpy'], axis = 0)
maxes = np.max(dataset['obs_rpy'], axis = 0)
shape = dataset['obs_rpy'].shape[1]
bins = np.linspace(mins,maxes+0.01, 11)
def get_quantisation(ags, bins):
qs = []
for idx in range(0 , shape):
quantiles = np.digitize(ags[:, idx], bins[:,idx])
qs.append(quantiles)
return np.array(qs).T
batch = t_it.next()
o = tf.reshape(batch['obs'][:,:,:], (-1, OBS_DIM))
coverage = get_quantisation(o, bins)
shapes = []
for i in range(0,10):
batch = t_it.next()
o = tf.reshape(batch['obs'][:,:,:], (-1, OBS_DIM))
c = get_quantisation(o, bins)
coverage = np.unique(np.concatenate([coverage, c], 0), axis = 0)
shapes.append(coverage.shape[0])
np.unique(get_quantisation(dataset['obs_rpy'], bins), axis = 0).shape
plt.plot([120215]*11)
plt.plot(old)
plt.plot(shapes)
plt.plot(one)
plt.title("Unique states observed in batches with shuffle size N")
plt.legend(['Unique values', 40, 10, 1])
```
| github_jupyter |
# Optimization of CNN - TPE
In this notebook, we will optimize the hyperparameters of a CNN using the define-by-run model from Optuna.
```
# For reproducible results.
# See:
# https://keras.io/getting_started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
import os
os.environ['PYTHONHASHSEED'] = '0'
import numpy as np
import tensorflow as tf
import random as python_random
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(123)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
python_random.seed(123)
# The below set_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see:
# https://www.tensorflow.org/api_docs/python/tf/random/set_seed
tf.random.set_seed(1234)
import itertools
from functools import partial
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from keras.utils.np_utils import to_categorical
from keras.models import Sequential, load_model
from keras.layers import Dense, Flatten, Conv2D, MaxPool2D
from keras.optimizers import Adam, RMSprop
import optuna
```
# Data Preparation
The dataset contains information about images, each image is a hand-written digit. The aim is to have the computer predict which digit was written by the person, automatically, by "looking" at the image.
Each image is 28 pixels in height and 28 pixels in width (28 x 28), making a total of 784 pixels. Each pixel value is an integer between 0 and 255, indicating the darkness in a gray-scale of that pixel.
The data is stored in a dataframe where each each pixel is a column (so it is flattened and not in the 28 x 28 format).
The data set the has 785 columns. The first column, called "label", is the digit that was drawn by the user. The rest of the columns contain the pixel-values of the associated image.
```
# Load the data
data = pd.read_csv("../mnist.csv")
# first column is the target, the rest of the columns
# are the pixels of the image
# each row is 1 image
data.head()
# split dataset into a train and test set
X_train, X_test, y_train, y_test = train_test_split(
data.drop(['label'], axis=1), # the images
data['label'], # the target
test_size = 0.1,
random_state=0)
X_train.shape, X_test.shape
# number of images for each digit
g = sns.countplot(x=y_train)
plt.xlabel('Digits')
plt.ylabel('Number of images')
```
There are roughly the same amount of images for each of the 10 digits.
## Image re-scaling
We re-scale data for the CNN, between 0 and 1.
```
# Re-scale the data
# 255 is the maximum value a pixel can take
X_train = X_train / 255
X_test = X_test / 255
```
## Reshape
The images were stored in a pandas dataframe as 1-D vectors of 784 values. For a CNN with Keras, we need tensors with the following dimensions: width x height x channel.
Thus, we reshape all data to 28 x 2 8 x 1, 3-D matrices.
The 3rd dimension corresponds to the channel. RGB images have 3 channels. MNIST images are in gray-scale, thus they have only one channel in the 3rd dimension.
```
# Reshape image in 3 dimensions:
# height: 28px X width: 28px X channel: 1
X_train = X_train.values.reshape(-1,28,28,1)
X_test = X_test.values.reshape(-1,28,28,1)
```
## Target encoding
```
# the target is 1 variable with the 9 different digits
# as values
y_train.unique()
# For Keras, we need to create 10 dummy variables,
# one for each digit
# Encode labels to one hot vectors (ex : digit 2 -> [0,0,1,0,0,0,0,0,0,0])
y_train = to_categorical(y_train, num_classes = 10)
y_test = to_categorical(y_test, num_classes = 10)
# the new target
y_train
```
Let's print some example images.
```
# Some image examples
g = plt.imshow(X_train[0][:,:,0])
# Some image examples
g = plt.imshow(X_train[10][:,:,0])
```
# Define-by-Run design
We create the CNN and add the sampling space for the hyperparameters as we go. This is the Desing-by-run concept.
```
# we will save the model with this name
path_best_model = 'cnn_model_2.h5'
# starting point for the optimization
best_accuracy = 0
# function to create the CNN
def objective(trial):
# Start construction of a Keras Sequential model.
model = Sequential()
# Convolutional layers.
# We add the different number of conv layers in the following loop:
num_conv_layers = trial.suggest_int('num_conv_layers', 1, 3)
for i in range(num_conv_layers):
# Note, with this configuration, we sample different filters, kernels
# stride etc, for each convolutional layer that we add
model.add(Conv2D(
filters=trial.suggest_categorical('filters_{}'.format(i), [16, 32, 64]),
kernel_size=trial.suggest_categorical('kernel_size{}'.format(i), [3, 5]),
strides=trial.suggest_categorical('strides{}'.format(i), [1, 2]),
activation=trial.suggest_categorical(
'activation{}'.format(i), ['relu', 'tanh']),
padding='same',
))
# we could also optimize these parameters if we wanted:
model.add(MaxPool2D(pool_size=2, strides=2))
# Flatten the 4-rank output of the convolutional layers
# to 2-rank that can be input to a fully-connected Dense layer.
model.add(Flatten())
# Add fully-connected Dense layers.
# The number of layers is a hyper-parameter we want to optimize.
# We add the different number of layers in the following loop:
num_dense_layers = trial.suggest_int('num_dense_layers', 1, 3)
for i in range(num_dense_layers):
# Add the dense fully-connected layer to the model.
# This has two hyper-parameters we want to optimize:
# The number of nodes (neurons) and the activation function.
model.add(Dense(
units=trial.suggest_int('units{}'.format(i), 5, 512),
activation=trial.suggest_categorical(
'activation{}'.format(i), ['relu', 'tanh']),
))
# Last fully-connected dense layer with softmax-activation
# for use in classification.
model.add(Dense(10, activation='softmax'))
# Use the Adam method for training the network.
optimizer_name = trial.suggest_categorical(
'optimizer_name', ['Adam', 'RMSprop'])
if optimizer_name == 'Adam':
optimizer = Adam(lr=trial.suggest_float('learning_rate', 1e-6, 1e-2))
else:
optimizer = RMSprop(
lr=trial.suggest_float('learning_rate', 1e-6, 1e-2),
momentum=trial.suggest_float('momentum', 0.1, 0.9),
)
# In Keras we need to compile the model so it can be trained.
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
# train the model
# we use 3 epochs to be able to run the notebook in a "reasonable"
# time. If we increase the epochs, we will have better performance
# this could be another parameter to optimize in fact.
history = model.fit(
x=X_train,
y=y_train,
epochs=3,
batch_size=128,
validation_split=0.1,
)
# Get the classification accuracy on the validation-set
# after the last training-epoch.
accuracy = history.history['val_accuracy'][-1]
# Save the model if it improves on the best-found performance.
# We use the global keyword so we update the variable outside
# of this function.
global best_accuracy
# If the classification accuracy of the saved model is improved ...
if accuracy > best_accuracy:
# Save the new model to harddisk.
# Training CNNs is costly, so we want to avoid having to re-train
# the network with the best found parameters. We save it instead
# as we search for the best hyperparam space.
model.save(path_best_model)
# Update the classification accuracy.
best_accuracy = accuracy
# Delete the Keras model with these hyper-parameters from memory.
del model
# Remember that Scikit-optimize always minimizes the objective
# function, so we need to negate the accuracy (because we want
# the maximum accuracy)
return accuracy
# we need this to store the search
# we will use it in the following notebook
study_name = "cnn_study_2" # unique identifier of the study.
storage_name = "sqlite:///{}.db".format(study_name)
study = optuna.create_study(
direction='maximize',
study_name=study_name,
storage=storage_name,
load_if_exists=True,
)
study.optimize(objective, n_trials=30)
```
# Analyze results
```
study.best_params
study.best_value
results = study.trials_dataframe()
results['value'].sort_values().reset_index(drop=True).plot()
plt.title('Convergence plot')
plt.xlabel('Iteration')
plt.ylabel('Accuracy')
results.head()
```
# Evaluate the model
```
# load best model
model = load_model(path_best_model)
model.summary()
# make predictions in test set
result = model.evaluate(x=X_test,
y=y_test)
# print evaluation metrics
for name, value in zip(model.metrics_names, result):
print(name, value)
```
## Confusion matrix
```
# Predict the values from the validation dataset
y_pred = model.predict(X_test)
# Convert predictions classes to one hot vectors
y_pred_classes = np.argmax(y_pred, axis = 1)
# Convert validation observations to one hot vectors
y_true = np.argmax(y_test, axis = 1)
# compute the confusion matrix
cm = confusion_matrix(y_true, y_pred_classes)
cm
# let's make it more colourful
classes = 10
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion matrix')
plt.colorbar()
tick_marks = np.arange(classes)
plt.xticks(tick_marks, range(classes), rotation=45)
plt.yticks(tick_marks, range(classes))
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > 100 else "black",
)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
```
Here we can see that our CNN performs very well on all digits.
| github_jupyter |
## Programming exercises##
```
#1.Write a program that finds all the numbers that are divisible by 7 but are not multiples of 5 between 2000 and 3200
a = 0
for x in range(2000, 3200+1):
if x % 7 == 0 and x % 5 != 0:
a += x
print(a)
#2. write a program that calculates whether a number is prime or not
n = int(input("enter a number: "))
if n >= 1:
flag = True
for i in range(2, n):
if (n % i) == 0:
flag = False
break
if flag:
print('n is prime')
else:
print('n is not prime')
#Another method
# divisor = 0
# for i in range(1, n + 1):
# if n % i == 0:
# divisores += 1
# if divisores == 2:
# print('n is prime')
# else:
# print('n is not prime')
#3. Write a program that counts how many numbers are multiples of 2 in a list
l = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
m = []
count = 0
for x in l:
if x % 2 == 0:
count += 1
m.append(x)
print(count)
#4. Write a program that inverts a text string
name = input('chain text: ')
inverted = ""
for x in range(len(name)):
inverted += name[-x - 1]
print(inverted)
#5. """ An example program that illustrates the use of docstrings """
def nand(bool1, bool2):
"""Take two Boolean values bool1 and bool2
and return the specified Boolean values"""
if bool1:
if bool2:
return False
else:
return True
else:
return True
#6. Calculate the number of days since you have born
"""One way in which to determine the number of
days in a month is to subtract the first of
the given month from the first of the next month.
The result should be the number of days in the given month."""
import datetime
def days_in_month(year, month):
"""
Inputs:
year - an integer between datetime.MINYEAR and datetime.MAXYEAR
representing the year
month - an integer between 1 and 12 representing the month
Returns:
The number of days in the input month.
"""
if (datetime.MINYEAR <= year <= datetime.MAXYEAR) and (1 <= month <= 11):
date1 = datetime.date(year, month, 1)
date2 = datetime.date(year, month + 1, 1)
return (date2 - date1).days
elif (datetime.MINYEAR <= year <= datetime.MAXYEAR) and (month == 12):
date1 = datetime.datetime(year, month, 1)
date2 = datetime.datetime(year + 1, 1, 1)
return (date2 - date1).days
else:
return False
def is_valid_date(year, month, day):
"""
Inputs:
year - an integer representing the year
month - an integer representing the month
day - an integer representing the day
Returns:
True if year-month-day is a valid date and
False otherwise
"""
days = days_in_month(year, month)
if ((datetime.MINYEAR <= year <= datetime.MAXYEAR) and (1<= month <= 12) and (0 < day <= days)):
return True
else:
return False
is_valid_date(2021, 8, 43)
def days_between(year1, month1, day1, year2, month2, day2):
"""
Inputs:
year1 - an integer representing the year of the first date
month1 - an integer representing the month of the first date
day1 - an integer representing the day of the first date
year2 - an integer representing the year of the second date
month2 - an integer representing the month of the second date
day2 - an integer representing the day of the second date
Returns:
The number of days from the first date to the second date.
Returns 0 if either date is invalid or the second date is
before the first date.
"""
if (is_valid_date(year1, month1, day1) and is_valid_date(year2, month2, day2)):
date1 = datetime.date(year1, month1, day1)
date2 = datetime.date(year2, month2, day2)
if date2 > date1:
return date2 - date1
else:
return 0
else:
return 0
def age_in_days(year, month, day):
"""
Inputs:
year - an integer representing the birthday year
month - an integer representing the birthday month
day - an integer representing the birthday day
Returns:
The age of a person with the input birthday as of today.
Returns 0 if the input date is invalid or if the input
date is in the future.
"""
if is_valid_date(year, month, day):
today = datetime.date.today()
birthday = datetime.date(year, month, day)
if (birthday < today):
person_age_in_days = days_between(year, month, day, today.year, today.month, today.day)
return person_age_in_days
else:
return 0
else:
return 0
print(age_in_days(1995, 2, 18))
#7. Format
mood1 = "happy"
mood2 = "Exited"
sentence1 = "I feel {1}, do you feel {0}? Or are you {0}? I'm not sure if we should be {1}.".format(mood1, mood2)
print(sentence1)
#8. Format
name1 = "Pierre"
age1 = 7
name2 = "May"
age2 = 13
line1 = "{0:^7} {1:>3}".format(name1, age1)
line2 = "{0:^7} {1:>3}".format(name2, age2)
print(line1)
print(line2)
```
| github_jupyter |
# QCoDeS Example with Tektronix Keithley 7510 Multimeter
In this example we will show how to use a few basic functions of the Keithley 7510 DMM. We attached the 1k Ohm resistor to the front terminals, with no source current or voltage.
For more detail about the 7510 DMM, please see the User's Manual: https://www.tek.com/digital-multimeter/high-resolution-digital-multimeters-manual/model-dmm7510-75-digit-graphical-sam-0, or Reference Manual: https://www.tek.com/digital-multimeter/high-resolution-digital-multimeters-manual-9
```
from qcodes.instrument_drivers.tektronix.keithley_7510 import Keithley7510
dmm = Keithley7510("dmm_7510", 'USB0::0x05E6::0x7510::04450363::INSTR')
```
# To reset the system to default settings:
```
dmm.reset()
```
# To perform measurement with different sense functions:
When first turned on, the default sense function is for DC voltage
```
dmm.sense.function()
```
to perform the measurement:
```
dmm.sense.voltage()
```
There'll be an error if try to call other functions, such as current:
```
try:
dmm.sense.current()
except AttributeError as err:
print(err)
```
To switch between functions, do the following:
```
dmm.sense.function('current')
dmm.sense.function()
dmm.sense.current()
```
And of course, once the sense function is changed to 'current', the user can't make voltage measurement
```
try:
dmm.sense.voltage()
except AttributeError as err:
print(err)
```
The available functions in the driver now are 'voltage', 'current', 'Avoltage', 'Acurrent', 'resistance', and 'Fresistance', where 'A' means 'AC', and 'F' means 'Four-wire'
```
try:
dmm.sense.function('ac current')
except ValueError as err:
print(err)
```
# To set measurement range (positive full-scale measure range):
By default, the auto range is on
```
dmm.sense.auto_range()
```
We can change it to 'off' as following
```
dmm.sense.auto_range(0)
dmm.sense.auto_range()
```
Note: this auto range setting is for the sense function at this moment, which is 'current'
```
dmm.sense.function()
```
If switch to another function, the auto range is still on, by default
```
dmm.sense.function('voltage')
dmm.sense.function()
dmm.sense.auto_range()
```
to change the range, use the following
```
dmm.sense.range(10)
dmm.sense.range()
```
This will also automatically turn off the auto range:
```
dmm.sense.auto_range()
```
the allowed range (upper limit) value is a set of discrete numbers, for example, 100mV, 1V, 10V, 100V, 100V. If a value other than those allowed values is input, the system will just use the "closest" one:
```
dmm.sense.range(150)
dmm.sense.range()
dmm.sense.range(105)
dmm.sense.range()
```
The driver will not give any error messages for the example above, but if the value is too large or too small, there'll be an error message:
```
try:
dmm.sense.range(0.0001)
except ValueError as err:
print(err)
```
# To set the NPLC (Number of Power Line Cycles) value for measurements:
By default, the NPLC is 1 for each sense function
```
dmm.sense.nplc()
```
To set the NPLC value:
```
dmm.sense.nplc(.1)
dmm.sense.nplc()
```
Same as the 'range' variable, each sense function has its own NPLC value:
```
dmm.sense.function('resistance')
dmm.sense.function()
dmm.sense.nplc()
```
# To set the delay:
By default, the auto delay is enabled. According to the guide, "When this is enabled, a delay is added after a range or function change to allow the instrument to settle." But it's unclear how much the delay is.
```
dmm.sense.auto_delay()
```
To turn off the auto delay:
```
dmm.sense.auto_delay(0)
dmm.sense.auto_delay()
```
To turn the auto delay back on:
```
dmm.sense.auto_delay(1)
dmm.sense.auto_delay()
```
There is also an "user_delay", but it is designed for rigger model, please see the user guide for detail.
To set the user delay time:
First to set a user number to relate the delay time with: (default user number is empty, so an user number has to be set before setting the delay time)
```
dmm.sense.user_number(1)
dmm.sense.user_number()
```
By default, the user delay is 0s:
```
dmm.sense.user_delay()
```
Then to set the user delay as following:
```
dmm.sense.user_delay(0.1)
dmm.sense.user_delay()
```
The user delay is tied to user number:
```
dmm.sense.user_number(2)
dmm.sense.user_number()
dmm.sense.user_delay()
```
For the record, the auto delay here is still on:
```
dmm.sense.auto_delay()
```
# To set auto zero (automatic updates to the internal reference measurements):
By default, the auto zero is on
```
dmm.sense.auto_zero()
```
To turn off auto zero:
```
dmm.sense.auto_zero(0)
dmm.sense.auto_zero()
```
The auto zero setting is also tied to each function, not universal:
```
dmm.sense.function('current')
dmm.sense.function()
dmm.sense.auto_zero()
```
There is way to ask the system to do auto zero once:
```
dmm.sense.auto_zero_once()
```
See P487 of the Reference Manual for how to use auto zero ONCE. Note: it's not funtion-dependent.
# To set averaging filter for measurements, including average count, and filter type:
By default, averaging is off:
```
dmm.sense.average()
```
To turn it on:
```
dmm.sense.average(1)
dmm.sense.average()
```
Default average count value is 10, **remember to turn average on**, or it will not work:
```
dmm.sense.average_count()
```
To change the average count:
```
dmm.sense.average_count(23)
dmm.sense.average_count()
```
The range for average count is 1 to 100:
```
try:
dmm.sense.average_count(200)
except ValueError as err:
print(err)
```
There are two average types, repeating (default) or moving filter:
```
dmm.sense.average_type()
```
To make changes:
```
dmm.sense.average_type('MOV')
dmm.sense.average_type()
```
| github_jupyter |
```
from __future__ import print_function
%matplotlib inline
import numpy as np
from scipy import stats
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.iolib.table import (SimpleTable, default_txt_fmt)
import seaborn as sns
from patsy import dmatrices
import os
sns.set_style('whitegrid')
# Step-2. Import data
os.chdir('/Users/pauline/Documents/Python')
df = pd.read_csv("Tab-Morph.csv")
df = df.dropna()
nsample = 25
#x = np.linspace(0, 25, nsample)
x = df.sedim_thick
X = np.column_stack((x, (x - 5)**2))
X = sm.add_constant(X)
beta = [5., 0.5, -0.01]
sig = 0.5
w = np.ones(nsample)
w[nsample * 6//10:] = 3
y_true = np.dot(X, beta)
e = np.random.normal(size=nsample)
y = y_true + sig * w * e
X = X[:,[0,1]]
# Step-3.
mod_wls = sm.WLS(y, X, weights=1./(w ** 2))
res_wls = mod_wls.fit()
print(res_wls.summary())
# Step-4.
res_ols = sm.OLS(y, X).fit()
print(res_ols.params)
print(res_wls.params)
# Step-5.
se = np.vstack([[res_wls.bse], [res_ols.bse], [res_ols.HC0_se],
[res_ols.HC1_se], [res_ols.HC2_se], [res_ols.HC3_se]])
se = np.round(se,4)
colnames = ['x1', 'const']
rownames = ['WLS', 'OLS', 'OLS_HC0', 'OLS_HC1', 'OLS_HC3', 'OLS_HC3']
tabl = SimpleTable(se, colnames, rownames, txt_fmt=default_txt_fmt)
print(tabl)
# Step-6.
covb = res_ols.cov_params()
prediction_var = res_ols.mse_resid + (X * np.dot(covb,X.T).T).sum(1)
prediction_std = np.sqrt(prediction_var)
tppf = stats.t.ppf(0.975, res_ols.df_resid)
# Step-7.
prstd_ols, iv_l_ols, iv_u_ols = wls_prediction_std(res_ols)
# Step-8.
prstd, iv_l, iv_u = wls_prediction_std(res_wls)
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(x, y, 'o', label="Bathymetric \nObservations", linewidth=.7, c='#0095d9')
ax.plot(x, y_true, '-', c='#1e50a2', label="True", linewidth=.9)
# OLS
ax.plot(x, res_ols.fittedvalues, 'r--', linewidth=.7)
ax.plot(x, iv_u_ols, 'r--', label="Ordinary Least Squares", linewidth=.7)
ax.plot(x, iv_l_ols, 'r--', linewidth=.7)
# WLS
ax.plot(x, res_wls.fittedvalues, '--.', c='#65318e', linewidth=.7, )
ax.plot(x, iv_u, '--', c='#65318e', label="Weighted Least Squares", linewidth=.7)
ax.plot(x, iv_l, '--', c='#65318e', linewidth=.7)
ax.legend(loc="best");
ax.set_xlabel('Sediment thickness, m', fontsize=10)
plt.title("Weighted Least Squares \nof sediment thickness at Mariana Trench by 25 bathymetric profiles", fontsize=14)
plt.annotate('D', xy=(-0.01, 1.06), xycoords="axes fraction", fontsize=18,
bbox=dict(boxstyle='round, pad=0.3', fc='w', edgecolor='grey', linewidth=1, alpha=0.9))
plt.show()
```
| github_jupyter |
# Applying Customizations
```
import pandas as pd
import numpy as np
import holoviews as hv
from holoviews import opts
hv.extension('bokeh', 'matplotlib')
```
As introduced in the [Customization](../getting_started/2-Customization.ipynb) section of the 'Getting Started' guide, HoloViews maintains a strict separation between your content (your data and declarations about your data) and its presentation (the details of how this data is represented visually). This separation is achieved by maintaining sets of keyword values ("options") that specify how elements are to appear, stored outside of the element itself. Option keywords can be specified for individual element instances, for all elements of a particular type, or for arbitrary user-defined sets of elements that you give a certain ``group`` and ``label`` (see [Annotating Data](../user_guide/01-Annotating_Data.ipynb)).
The options system controls how individual plots appear, but other important settings are made more globally using the "output" system, which controls HoloViews plotting and rendering code (see the [Plots and Renderers](Plots_and_Renderers.ipynb) user guide). In this guide we will show how to customize the visual styling with the options and output systems, focusing on the mechanisms rather than the specific choices available (which are covered in other guides such as [Style Mapping](04-Style_Mapping.ipynb)).
## Core concepts
This section offers an overview of some core concepts for customizing visual representation, focusing on how HoloViews keeps content and presentation separate. To start, we will revisit the simple introductory example in the [Customization](../getting_started/2-Customization.ipynb) getting-started guide (which might be helpful to review first).
```
spike_train = pd.read_csv('../assets/spike_train.csv.gz')
curve = hv.Curve(spike_train, 'milliseconds', 'Hertz')
spikes = hv.Spikes(spike_train, 'milliseconds', [])
```
And now we display the ``curve`` and a ``spikes`` elements together in a layout as we did in the getting-started guide:
```
curve = hv.Curve( spike_train, 'milliseconds', 'Hertz')
spikes = hv.Spikes(spike_train, 'milliseconds', [])
layout = curve + spikes
layout.opts(
opts.Curve( height=200, width=900, xaxis=None, line_width=1.50, color='red', tools=['hover']),
opts.Spikes(height=150, width=900, yaxis=None, line_width=0.25, color='grey')).cols(1)
```
This example illustrates a number of key concepts, as described below.
### Content versus presentation
In the getting-started guide [Introduction](../getting_started/1-Introduction.ipynb), we saw that we can print the string representation of HoloViews objects such as `layout`:
```
print(layout)
```
In the [Customization](../getting_started/2-Customization.ipynb) getting-started guide, the `.opts.info()` method was introduced that lets you see the options *associated* with (though not stored on) the objects:
```
layout.opts.info()
```
If you inspect all the state of the `Layout`, `Curve`, or `Spikes` objects you will not find any of these keywords, because they are stored in an entirely separate data structure. HoloViews assigns a unique ID per HoloViews object that lets arbitrarily specific customization be associated with that object if needed, while also making it simple to define options that apply to entire classes of objects by type (or group and label if defined). The HoloViews element is thus *always* a thin wrapper around your data, without any visual styling information or plotting state, even though it *seems* like the object includes the styling information. This separation between content and presentation is by design, so that you can work with your data and with its presentation entirely independently.
If you wish to clear the options that have been associated with an object `obj`, you can call `obj.opts.clear()`.
## Option builders
The [Customization](../getting_started/2-Customization.ipynb) getting-started guide also introduces the notion of *option builders*. One of the option builders in the visualization shown above is:
```
opts.Curve( height=200, width=900, xaxis=None, line_width=1.50, color='red', tools=['hover'])
```
An *option builder* takes a collection of keywords and returns an `Options` object that stores these keywords together. Why should you use option builders and how are they different from a vanilla dictionary?
1. The option builder specifies which type of HoloViews object the options are for, which is important because each type accepts different options.
2. Knowing the type, the options builder does *validation* against that type for the currently loaded plotting extensions. Try introducing a typo into one of the keywords above; you should get a helpful error message. Separately, try renaming `line_width` to `linewidth`, and you'll get a different message because the latter is a valid matplotlib keyword.
3. The option builder allows *tab-completion* in the notebook. This is useful for discovering available keywords for that type of object, which helps prevent mistakes and makes it quicker to specify a set of keywords.
In the cell above, the specified options are applicable to `Curve` elements, and different validation and tab completion will be available for other types.
The returned `Options` object is different from a dictionary in the following ways:
1. An optional *spec* is recorded, where this specification is normally just the element name. Above this is simply 'Curve'. Later, in section [Using `group` and `label`](#Using-group-and-label), we will see how this can also specify the `group` and `label`.
2. The keywords are alphanumerically sorted, making it easier to compare `Options` objects.
## Inlining options
When customizing a single element, the use of an option builder is not mandatory. If you have a small number of keywords that are common (e.g `color`, `cmap`, `title`, `width`, `height`) it can be clearer to inline them into the `.opts` method call if tab-completion and validation isn't required:
```
np.random.seed(42)
array = np.random.random((10,10))
im1 = hv.Image(array).opts(opts.Image(cmap='Reds')) # Using an option builder
im2 = hv.Image(array).opts(cmap='Blues') # Without an option builder
im1 + im2
```
You cannot inline keywords for composite objects such as `Layout` or `Overlay` objects. For instance, the `layout` object is:
```
print(layout)
```
To customize this layout, you need to use an option builder to associate your keywords with either the `Curve` or the `Spikes` object, or else you would have had to apply the options to the individual elements before you built the composite object. To illustrate setting by type, note that in the first example, both the `Curve` and the `Spikes` have different `height` values provided.
You can also target options by the `group` and `label` as described in section on [using `group` and `label`](#Using-group-and-label).
## Session-specific options
One other common need is to set some options for a Python session, whether using Jupyter notebook or not. For this you can set the default options that will apply to all objects created subsequently:
```
opts.defaults(
opts.HeatMap(cmap='Summer', colorbar=True, toolbar='above'))
```
The `opt.defaults` method has now set the style used for all `HeatMap` elements used in this session:
```
data = [(chr(65+i), chr(97+j), i*j) for i in range(5) for j in range(5) if i!=j]
heatmap = hv.HeatMap(data).sort()
heatmap
```
## Discovering options
Using tab completion in the option builders is one convenient and easy way of discovering the available options for an element. Another approach is to use `hv.help`.
For instance, if you run `hv.help(hv.Curve)` you will see a list of the 'style' and 'plot' options applicable to `Curve`. The distinction between these two types of options can often be ignored for most purposes, but the interested reader is encouraged to read more about them in more detail [below](#Split-into-style,-plot-and-norm-options).
For the purposes of discovering the available options, the keywords listed under the 'Style Options' section of the help output is worth noting. These keywords are specific to the active plotting extension and are part of the API for that plotting library. For instance, running `hv.help(hv.Curve)` in the cell below would give you the keywords in the Bokeh documentation that you can reference for customizing the appearance of `Curve` objects.
## Maximizing readability
There are many ways to specify options in your code using the above tools, but for creating readable, maintainable code, we recommend making the separation of content and presentation explicit. Someone reading your code can then understand your visualizations in two steps 1) what your data *is* in terms of the applicable elements and containers 2) how this data is to be presented visually.
The following guide details the approach we have used through out the examples and guides on holoviews.org. We have found that following these rules makes code involving holoviews easier to read and more consistent.
The core principle is as follows: ***avoid mixing declarations of data, elements and containers with details of their visual appearance***.
### Two contrasting examples
One of the best ways to do this is to declare all your elements, compose them and then apply all the necessary styling with the `.opts` method before the visualization is rendered to disk or to the screen. For instance, the example from the getting-started guide could have been written sub-optimally as follows:
***Sub-optimal***
```python
curve = hv.Curve( spike_train, 'milliseconds', 'Hertz').opts(
height=200, width=900, xaxis=None, line_width=1.50, color='red', tools=['hover'])
spikes = hv.Spikes(spike_train, 'milliseconds', vdims=[]).opts(
height=150, width=900, yaxis=None, line_width=0.25, color='grey')
(curve + spikes).cols(1)
```
Code like that is very difficult to read because it mixes declarations of the data and its dimensions with details about how to present it. The recommended version declares the `Layout`, then separately applies all the options together where it's clear that they are just hints for the visualization:
***Recommended***
```python
curve = hv.Curve( spike_train, 'milliseconds', 'Hertz')
spikes = hv.Spikes(spike_train, 'milliseconds', [])
layout = curve + spikes
layout.opts(
opts.Curve( height=200, width=900, xaxis=None, line_width=1.50, color='red', tools=['hover']),
opts.Spikes(height=150, width=900, yaxis=None, line_width=0.25, color='grey')).cols(1)
```
By grouping the options in this way and applying them at the end, you can see the definition of `layout` without being distracted by visual concerns declared later. Conversely, you can modify the visual appearance of `layout` easily without needing to know exactly how it was defined. The [coding style guide](#Coding-style-guide) section below offers additional advice for keeping things readable and consistent.
### When to use multiple`.opts` calls
The above coding style applies in many case, but sometimes you have multiple elements of the same type that you need to distinguish visually. For instance, you may have a set of curves where using the `dim` or `Cycle` objects (described in the [Style Mapping](04-Style_Mapping.ipynb) user guide) is not appropriate and you want to customize the appearance of each curve individually. Alternatively, you may be generating elements in a list comprehension for use in `NdOverlay` and have a specific style to apply to each one.
In these situations, it is often appropriate to use the inline style of `.opts` locally. In these instances, it is often best to give the individually styled objects a suitable named handle as illustrated by the [legend example](../gallery/demos/bokeh/legend_example.ipynb) of the gallery.
### General advice
As HoloViews is highly compositional by design, you can always build long expressions mixing the data and element declarations, the composition of these elements, and their customization. Even though such expressions can be terse they can also be difficult to read.
The simplest way to avoid long expressions is to keep some level of separation between these stages:
1. declaration of the data
2. declaration of the elements, including `.opts` to distinguish between elements of the same type if necessary
3. composition with `+` and `*` into layouts and overlays, and
4. customization of the composite object, either with a final call to the `.opts` method, or by declaring such settings as the default for your entire session as described [above](#Session-specific-options).
When stages are simple enough, it can be appropriate to combine them. For instance, if the declaration of the data is simple enough, you can fold in the declaration of the element. In general, any expression involving three or more of these stages will benefit from being broken up into several steps.
These general principles will help you write more readable code. Maximizing readability will always require some level of judgement, but you can maximize consistency by consulting the [coding style guide](#Coding-style-guide) section for more tips.
# Customizing display output
The options system controls most of the customizations you might want to do, but there are a few settings that are controlled at a more general level that cuts across all HoloViews object types: the active plotting extension (e.g. Bokeh or Matplotlib), the output display format (PNG, SVG, etc.), the output figure size, and other similar options. The `hv.output` utility allows you to modify these more global settings, either for all subsequent objects or for one particular object:
* `hv.output(**kwargs)`: Customize how the output appears for the rest of the notebook session.
* `hv.output(obj, **kwargs)`: Temporarily affect the display of an object `obj` using the keyword `**kwargs`.
The `hv.output` utility only has an effect in contexts where HoloViews objects can be automatically displayed, which currently is limited to the Jupyter Notebook (in either its classic or JupyterLab variants). In any other Python context, using `hv.output` has no effect, as there is no automatically displayed output; see the [hv.save() and hv.render()](Plots_and_Renderers.ipynb#Saving-and-rendering) utilities for explicitly creating output in those other contexts.
To start with `hv.output`, let us define a `Path` object:
```
lin = np.linspace(0, np.pi*2, 200)
def lissajous(t, a, b, delta):
return (np.sin(a * t + delta), np.sin(b * t), t)
path = hv.Path([lissajous(lin, 3, 5, np.pi/2)])
path.opts(opts.Path(color='purple', line_width=3, line_dash='dotted'))
```
Now, to illustrate, let's use `hv.output` to switch our plotting extension to matplotlib:
```
hv.output(backend='matplotlib', fig='svg')
```
We can now display our `path` object with some option customization:
```
path.opts(opts.Path(linewidth=2, color='red', linestyle='dotted'))
```
Our plot is now rendered with Matplotlib, in SVG format (try right-clicking the image in the web browser and saving it to disk to confirm). Note that the `opts.Path` option builder now tab completes *Matplotlib* keywords because we activated the Matplotlib plotting extension beforehand. Specifically, `linewidth` and `linestyle` don't exist in Bokeh, where the corresponding options are called `line_width` and `line_dash` instead.
You can see the custom output options that are currently active using `hv.output.info()`:
```
hv.output.info()
```
The info method will always show which backend is active as well as any other custom settings you have specified. These settings apply to the subsequent display of all objects unless you customize the output display settings for a single object.
To illustrate how settings are kept separate, let us switch back to Bokeh in this notebook session:
```
hv.output(backend='bokeh')
hv.output.info()
```
With Bokeh active, we can now declare options on `path` that we want to apply only to matplotlib:
```
path = path.opts(
opts.Path(linewidth=3, color='blue', backend='matplotlib'))
path
```
Now we can supply `path` to `hv.output` to customize how it is displayed, while activating matplotlib to generate that display. In the next cell, we render our path at 50% size as an SVG using matplotlib.
```
hv.output(path, backend='matplotlib', fig='svg', size=50)
```
Passing `hv.output` an object will apply the specified settings only for the subsequent display. If you were to view `path` now in the usual way, you would see that it is still being displayed with Bokeh with purple dotted lines.
One thing to note is that when we set the options with `backend='matplotlib'`, the active plotting extension was Bokeh. This means that `opts.Path` will tab complete *bokeh* keywords, and not the matplotlib ones that were specified. In practice you will want to set the backend appropriately before building your options settings, to ensure that you get the most appropriate tab completion.
### Available `hv.output` settings
You can see the available settings using `help(hv.output)`. For reference, here are the most commonly used ones:
* **backend**: *The backend used by HoloViews*. If the necessary libraries are installed this can be `'bokeh'`, `'matplotlib'` or `'plotly'`.
* **fig** : *The static figure format*. The most common options are `'svg'` and `'png'`.
* **holomap**: *The display type for holomaps*. With matplotlib and the necessary support libraries, this may be `'gif'` or `'mp4'`. The JavaScript `'scrubber'` widgets as well as the regular `'widgets'` are always supported.
* **fps**: *The frames per second used for animations*. This setting is used for GIF output and by the scrubber widget.
* **size**: *The percentage size of displayed output*. Useful for making all display larger or smaller.
* **dpi**: *The rendered dpi of the figure*. This setting affects raster output such as PNG images.
In `help(hv.output)` you will see a few other, less common settings. The `filename` setting particular is not recommended and will be deprecated in favor of `hv.save` in future.
## Coding style guide
Using `hv.output` plus option builders with the `.opts` method and `opts.default` covers the functionality required for most HoloViews code written by users. In addition to these recommended tools, HoloViews supports [Notebook Magics](Notebook_Magics.ipynb) (not recommended because they are Jupyter-specific) and literal (nested dictionary) formats useful for developers, as detailed in the [Extending HoloViews](#Extending-HoloViews) section.
This section offers further recommendations for how users can structure their code. These are generally tips based on the important principles described in the [maximizing readability](#Maximizing-readability) section that are often helpful but optional.
* Use as few `.opts` calls as necessary to style the object the way you want.
* You can inline keywords without an option builder if you only have a few common keywords. For instance, `hv.Image(...).opts(cmap='Reds')` is clearer to read than `hv.Image(...).opts(opts.Image(cmap='Reds'))`.
* Conversely, you *should* use an option builder if you have more than four keywords.
* When you have multiple option builders, it is often clearest to list them on separate lines with a single intentation in both `.opts` and `opts.defaults`:
**Not recommended**
```
layout.opts(opts.VLine(color='white'), opts.Image(cmap='Reds'), opts.Layout(width=500), opts.Curve(color='blue'))
```
**Recommended**
```
layout.opts(
opts.Curve(color='blue'),
opts.Image(cmap='Reds'),
opts.Layout(width=500),
opts.VLine(color='white'))
```
* The latter is recommended for another reason: if possible, list your element option builders in alphabetical order, before your container option builders in alphabetical order.
* Keep the expression before the `.opts` method simple so that the overall expression is readable.
* Don't mix `hv.output` and use of the `.opts` method in the same expression.
## What is `.options`?
If you tab complete a HoloViews object, you'll notice there is an `.options` method as well as a `.opts` method. So what is the difference?
The `.options` method was introduced in HoloViews 1.10 and was the first time HoloViews allowed users to ignore the distinction between 'style', 'plot' and 'norm' options described in the next section. It is largely equivalent to the `.opts` method except that it applies the options on a returned clone of the object.
In other words, you have `clone = obj.options(**kwargs)` where `obj` is unaffected by the keywords supplied while `clone` will be customized. Both `.opts` and `.options` support an explicit `clone` keyword, so:
* `obj.opts(**kwargs, clone=True)` is equivalent to `obj.options(**kwargs)`, and conversely
* `obj.options(**kwargs, clone=False)` is equivalent to `obj.opts(**kwargs)`
For this reason, users only ever need to use `.opts` and occasionally supply `clone=True` if required. The only other difference between these methods is that `.opts` supports the full literal specification that allows splitting into [style, plot and norm options](#Split-into-style,-plot-and-norm-options) (for developers) whereas `.options` does not.
## When should I use `clone=True`?
The 'Persistent styles' section of the [customization](../getting_started/2-Customization.ipynb) user guide shows how HoloViews remembers options set for an object (per plotting extension). For instance, we never customized the `spikes` object defined at the start of the notebook but we did customize it when it was part of a `Layout` called `layout`. Examining this `spikes` object, we see the options were applied to the underlying object, not just a copy of it in the layout:
```
spikes
```
This is because `clone=False` by default in the `.opts` method. To illustrate `clone=True`, let's view some purple spikes *without* affecting the original `spikes` object:
```
purple_spikes = spikes.opts(color='purple', clone=True)
purple_spikes
```
Now if you were to look at `spikes` again, you would see it is still looks like the grey version above and only `purple_spikes` is purple. This means that `clone=True` is useful when you want to keep different styles for some HoloViews object (by making styled clones of it) instead of overwriting the options each time you call `.opts`.
## Extending HoloViews
In addition to the formats described above for use by users, additional option formats are supported that are less user friendly for data exploration but may be more convenient for library authors building on HoloViews.
The first of these is the *`Option` list syntax* which is typically most useful outside of notebooks, a *literal syntax* that avoids the need to import `opts`, and then finally a literal syntax that keeps *style* and *plot* options separate.
### `Option` list syntax
If you find yourself using `obj.opts(*options)` where `options` is a list of `Option` objects, use `obj.opts(options)` instead as list input is also supported:
```
options = [
opts.Curve( height=200, width=900, xaxis=None, line_width=1.50, color='grey', tools=['hover']),
opts.Spikes(height=150, width=900, yaxis=None, line_width=0.25, color='orange')]
layout.opts(options).cols(1)
```
This approach is often best in regular Python code where you are dynamically building up a list of options to apply. Using the option builders early also allows for early validation before use in the `.opts` method.
### Literal syntax
This syntax has the advantage of being a pure Python literal but it is harder to work with directly (due to nested dictionaries), is less readable, lacks tab completion support and lacks validation at the point where the keywords are defined:
```
layout.opts(
{'Curve': dict(height=200, width=900, xaxis=None, line_width=2, color='blue', tools=['hover']),
'Spikes': dict(height=150, width=900, yaxis=None, line_width=0.25, color='green')}).cols(1)
```
The utility of this format is you don't need to import `opts` and it is easier to dynamically add or remove keywords using Python or if you are storing options in a text file like YAML or JSON and only later applying them in Python code. This format should be avoided when trying to maximize readability or make the available keyword options easy to explore.
### Using `group` and `label`
The notion of an element `group` and `label` was introduced in [Annotating Data](./01-Annotating_Data.ipynb). This type of metadata is helpful for organizing large collections of elements with shared styling, such as automatically generated objects from some external software (e.g. a simulator). If you have a large set of elements with semantically meaningful `group` and `label` parameters set, you can use this information to appropriately customize large numbers of visualizations at once.
To illustrate, here are four overlaid curves where three have the `group` of 'Sinusoid' and one of these also has the label 'Squared':
```
xs = np.linspace(-np.pi,np.pi,100)
curve = hv.Curve((xs, xs/3))
group_curve1 = hv.Curve((xs, np.sin(xs)), group='Sinusoid')
group_curve2 = hv.Curve((xs, np.sin(xs+np.pi/4)), group='Sinusoid')
label_curve = hv.Curve((xs, np.sin(xs)**2), group='Sinusoid', label='Squared')
curves = curve * group_curve1 * group_curve2 * label_curve
curves
```
We can now use the `.opts` method to make all curves blue unless they are in the 'Sinusoid' group in which case they are red. Additionally, if a curve in the 'Sinusoid' group also has the label 'Squared', we can make sure that curve is green with a custom interpolation option:
```
curves.opts(
opts.Curve(color='blue'),
opts.Curve('Sinusoid', color='red'),
opts.Curve('Sinusoid.Squared', interpolation='steps-mid', color='green'))
```
By using `opts.defaults` instead of the `.opts` method, we can use this type of customization to apply options to many elements, including elements that haven't even been created yet. For instance, if we run:
```
opts.defaults(opts.Area('Error', alpha=0.5, color='grey'))
```
Then any `Area` element with a `group` of 'Error' will then be displayed as a semi-transparent grey:
```
X = np.linspace(0,2,10)
hv.Area((X, np.random.rand(10), -np.random.rand(10)), vdims=['y', 'y2'], group='Error')
```
## Split into `style`, `plot` and `norm` options
In `HoloViews`, an element such as `Curve` actually has three semantic distinct categories of options: `style`, `plot`, and `norm` options. Normally, a user doesn't need to worry about the distinction if they spend most of their time working with a single plotting extension.
When trying to build a system that consistently needs to generate visualizations across different plotting libraries, it can be useful to make this distinction explicit:
##### ``style`` options:
``style`` options are passed directly to the underlying rendering backend that actually draws the plots, allowing you to control the details of how it behaves. Each backend has its own options (e.g. the [``bokeh``](Bokeh_Backend) or plotly backends).
For whichever backend has been selected, HoloViews can tell you which options are supported, but you will need to read the corresponding documentation (e.g. [matplotlib](http://matplotlib.org/contents.html), [bokeh](http://bokeh.pydata.org)) for the details of their use. For listing available options, see the ``hv.help`` as described in the [Discovering options](#Discovering-options) section.
HoloViews has been designed to be easily extensible to additional backends in the future and each backend would have its own set of style options.
##### ``plot`` options:
Each of the various HoloViews plotting classes declares various [Parameters](http://param.pyviz.org) that control how HoloViews builds the visualization for that type of object, such as plot sizes and labels. HoloViews uses these options internally; they are not simply passed to the underlying backend. HoloViews documents these options fully in its online help and in the [Reference Manual](http://holoviews.org/Reference_Manual). These options may vary for different backends in some cases, depending on the support available both in that library and in the HoloViews interface to it, but we try to keep any options that are meaningful for a variety of backends the same for all of them. For listing available options, see the output of ``hv.help``.
##### ``norm`` options:
``norm`` options are a special type of plot option that are applied orthogonally to the above two types, to control normalization. Normalization refers to adjusting the properties of one plot relative to those of another. For instance, two images normalized together would appear with relative brightness levels, with the brightest image using the full range black to white, while the other image is scaled proportionally. Two images normalized independently would both cover the full range from black to white. Similarly, two axis ranges normalized together are effectively linked and will expand to fit the largest range of either axis, while those normalized separately would cover different ranges. For listing available options, see the output of ``hv.help``.
You can preserve the semantic distinction between these types of option in an augmented form of the [Literal syntax](#Literal-syntax) as follows:
```
full_literal_spec = {
'Curve': {'style':dict(color='orange')},
'Curve.Sinusoid': {'style':dict(color='grey')},
'Curve.Sinusoid.Squared': {'style':dict(color='black'),
'plot':dict(interpolation='steps-mid')}}
curves.opts(full_literal_spec)
```
This specification is what HoloViews uses internally, but it is awkward for people to use and is not ever recommended for normal users. That said, it does offer the maximal amount of flexibility and power for integration with other software.
For instance, a simulator that can output visualization using either Bokeh or Matplotlib via HoloViews could use this format. By keeping the 'plot' and 'style' options separate, the 'plot' options could be set regardless of the plotting library while the 'style' options would be conditional on the backend.
## Onwards
This section of the user guide has described how you can discover and set customization options in HoloViews. Using `hv.help` and the option builders, you should be able to find the options available for any given object you want to display.
What *hasn't* been explored are some of the facilities HoloViews offers to map the dimensions of your data to style options. This important topic is explored in the next user guide [Style Mapping](04-Style_Mapping.ipynb), where you will learn of the `dim` object as well as about the `Cycle` and `Palette` objects.
| github_jupyter |
## What is the purpose of programming?
If you do much work on computers, eventually you find that there’s some task you’d like to automate.The goal of this course is to help you learn the basics of how to translate algorithmic or mathematical ideas you may have into instructions that your computer can understand and implement.
Programming languages provide the interface that allows you to do so and each language does so in unique ways.
While each language is different, the basic principles of programming translate across language and this is what we want you to understand.
Python is one of the most popular languages in use today and is used by people from all fields.
## A Little Bit of Jupyter...
As you can see in this notebook there are these boxes that you can type in- each box can store either code that python will try to "interpret" or text which will just display. BUT you need to tell the jupyter notebook what you intend to do by changing the type of the cell using the dropdown menu: Cell --> CellType --> Code/Markdown. To evaluate a cell use Shift-Enter
```
This is not a text cell but I am typing text
```
This is a text cell and now the text is not interpreted.
```
# If you want to put text in a code cell- use the # symbol before the text
```
Note you can insert or delete cells using these menus: Insert --> Cell Above/Below or Edit --> Delete Cells
## Functions and Arguments (a very brief intro.. )

## Data Types
We will talk more about this later but when you write things in Code, every item has a type... you can find out the type of something using the type() function
```
type(3) # this is an integer
type(3.0) # this is a float
type("3.0, abc") # this is a string
```
Why did the above just print str when it should have printed float and then str? This is because jupyter notebook will evaluate the code in the most recent line and put that in the output. If you want to print something along the way there is a python command for that!
```
print(type(3.0))
print(type("3.0, abc"))
print("Hello World...")
#Printing two things on the same line
print("3", 4)
help(print)
```
### Casting
```
#What if I want to convert that float to an integer or a string?
int(3.0)
str(3.0)
print(type(str(3.0)))
```
## Python as a calculator!
```
# Arithmetic operations
5 + 2
5 - 2
5 * 2
# We can check the type of the output as before
type(5*2)
# floating point by default
print(5 / 2)
print(type(5/2))
5 // 2
5 ** 2
3 * * 4 #spaces matter if it changes the operation
3*4 # here the lack of spaces doesnt change each item
5 ** 0.5
# order of operations
(5 + 4/2) ** 2
```
## Python is a smart calculator ...
```
"3.0" + "4.0" # the addition operation can be used to combine strings
"3.0" - "4.0"
"3" + 4 # we can't add a string and int
```
### Comparisons
```
print(3 == 3)
print(1 == 3)
print("abc" == "abc")
print("abc" == "ABC")
print(abc == "abc")
```
### Basics of Conditionals
We just saw comparisons- comparisons return True or False and this can be used to tell the computer what to do.
For example if 'str1' == 'str2': print("Yes they are equal!"). Let's try this... remember after conditions like if you need a colon and indentation of what you want to do.
```
if "abc" == "abc":
print("Yes they are equal!")
```
What if that is not true and we want to do something? We can use the "not" operator for this
```
print("Comparing first...")
if not "abc" == "abc":
print("No they are not equal!")
print("Comparing second...")
if not "ABC" == "abc":
print("No they are not equal!")
```
We can combine these into one statement using the "else" idea.. just as in english the commands are staged if .. do this, else.. do something else.
```
if "abc" == "abc":
print("Yes they are equal!")
else:
print("No they are not equal!")
```
## Keeping things around to use later... Variables!
All those expressions output something but how do we store them to use them later? Python uses the '=' to assign things to certain names. The name then becomes a container for whatever is on the right of the equals
```
a = 10
b = 20
a + b
```
What is the type of a?
```
type(a)
```
But we can't use anything as names..
```
10=20
print = 10 #bad!
print(6)
```
How do I delete a variable? (Little discussion about state )
```
del print
print(6)
quarter = 1/4
half = 2 * quarter
print(half)
print(quarter)
print(half == quarter) # we can compare variables to see if they are the same
```
If there are a lot of variables printing is tedious... let's use the command whos. If we type 'whos' it will show all variables.
```
whos
```
The golden ratio is a special number for many reasons it can be defined as (1 + sqrt(5))/2. Store the golden ratio in a variable. Next convert this to a string and print one string that says: "This is the golden ratio (golden ratio)" where the parentheses refers to the string version of the golden ratio variable.
Store another variable b that has value 1.
Calculate 1 + 1/goldenratio ... what does this look like? Verify this in python using a comparison.
```
goldenratio = (1 + 5 ** 0.5)/2
print("This is the golden ratio " + str(goldenratio))
print(1+ (1/goldenratio))
print(goldenratio == 1+1/goldenratio)
```
## Sequences
If we want to put a sequence of things into one item (a la an excel row) how do we do that? One way is through the data type
list.
```
x = [1,2,3,4,5]
print(x)
```
How do I grab an element? In python indexing is done through square brackets[] we will see more of this later, but for a list the brackets will require an integer to select an index. Python indexes starting at 0!
```
x[0]
x[1.0]
x[-1] #you can index the end through negatives
x[-2]
#The : allows you to get all elements after or before the index
print(x[1:])
print(x[:4])
#lists can store other types too
y = ['1', 2, '3', 4.0]
```
## Basic loops
How do I go through each element of y and print it's type?
```
for element in y: #note the colon
print(type(element))
```
What if I wanted to loop through y and just print the 3rd element?
```
#python uses the range() function to create a list of numbers
size_y = len(y)
print(range(size_y))
for i in range(size_y):
#index 2 is the 3rd element uis a conditional!
if i == 2:
print(y[i])
#print index of elements that equal '3'
for i in range(size_y):
if y[i] == '3':
print("Index of element that equals '3':", i)
```
HARDER Example: What if we for some reason couldn't calculate the size of y and still want to print the 3rd element? We can use what is known as a while loop.
```
i = 0
while True:
print("Index: " , i)
if i == 2:
print(y[i])
break
i += 1
```
| github_jupyter |
# Reading outputs from E+
```
# some initial set up
# if you have not installed epp, and only downloaded it
# you will need the following lines
import sys
# pathnameto_eppy = 'c:/eppy'
pathnameto_eppy = '../'
sys.path.append(pathnameto_eppy)
```
## Using titletable() to get at the tables
So far we have been making changes to the IDF input file.
How about looking at the outputs.
Energyplus makes nice htmlout files that look like this.
```
import ex_inits #no need to know this code, it just shows the image below
for_images = ex_inits
for_images.display_png(for_images.html_snippet1) #display the image below
```
If you look at the clipping of the html file above, you see tables with data in them. Eppy has functions that let you access of these tables and get the data from any of it's cells.
Let us say you want to find the "Net Site Energy".
This is in table "Site and Source Energy".
The number you want is in the third row, second column and it's value is "47694.47"
Let us use eppy to extract this number
```
from eppy.results import readhtml # the eppy module with functions to read the html
fname = "../eppy/resources/outputfiles/V_7_2/5ZoneCAVtoVAVWarmestTempFlowTable_ABUPS.html" # the html file you want to read
filehandle = open(fname, 'r').read()
htables = readhtml.titletable(filehandle) # reads the tables with their titles
```
If you open the python file readhtml.py and look at the function titletable, you can see the function documentation.
It says the following
```
"""return a list of [(title, table), .....]
title = previous item with a <b> tag
table = rows -> [[cell1, cell2, ..], [cell1, cell2, ..], ..]"""
```
The documentation says that it returns a list.
Let us take a look inside this list.
Let us look at the first item in the list.
```
firstitem = htables[0]
print(firstitem)
```
Ughh !!! that is ugly. Hard to see what it is.
Let us use a python module to print it pretty
```
import pprint
pp = pprint.PrettyPrinter()
pp.pprint(firstitem)
```
Nice. that is a little clearer
```
firstitem_title = firstitem[0]
pp.pprint(firstitem_title)
firstitem_table = firstitem[1]
pp.pprint(firstitem_table)
```
How do we get to value of "Net Site Energy".
We know it is in the third row, second column of the table.
Easy.
```
thirdrow = firstitem_table[2] # we start counting with 0. So 0, 1, 2 is third row
print(thirdrow)
thirdrow_secondcolumn = thirdrow[1]
thirdrow_secondcolumn
```
the text from the html table is in unicode.
That is why you see that weird 'u' letter.
Let us convert it to a floating point number
```
net_site_energy = float(thirdrow_secondcolumn)
net_site_energy
```
Let us have a little fun with the tables.
Get the titles of all the tables
```
alltitles = [htable[0] for htable in htables]
alltitles
```
Now let us grab the tables with the titles "Building Area" and "Site to Source Energy Conversion Factors"
twotables = [htable for htable in htables if htable[0] in ["Building Area", "Site to Source Energy Conversion Factors"]]
twotables
Let us leave readtables for now.
It gives us the basic functionality to read any of the tables in the html output file.
## Fast HTML table file read
The function`readhtml.titletable()` will be slow with extremeley large files. If you are dealing with a very large file use the following functions
```
from eppy.results import fasthtml
fname = "../eppy/resources/outputfiles/V_7_2/5ZoneCAVtoVAVWarmestTempFlowTable_ABUPS.html" # the html file you want to read
filehandle = open(fname, 'r') # get a file handle to the html file
firsttable = fasthtml.tablebyindex(filehandle, 0)
pp.pprint(firstitem)
filehandle = open(fname, 'r') # get a file handle to the html file
namedtable = fasthtml.tablebyname(filehandle, "Site and Source Energy")
pp.pprint(namedtable)
```
- You can read only one table at a time
- You need to open the file each time you call the function. The function will close the file.
## Using lines_table() to get at the tables
We have been using titletable() to get at the tables. There is a constraint using function titletable(). Titletable() assumes that there is a unique title (in HTML bold) just above the table. It is assumed that this title will adequetly describe the table. This is true in most cases and titletable() is perfectly good to use. Unfortuntely there are some tables that do not follow this rule. The snippet below shows one of them.
```
import ex_inits #no need to know this code, it just shows the image below
for_images = ex_inits
for_images.display_png(for_images.html_snippet2) # display the image below
```
Notice that the HTML snippet shows a table with three lines above it. The first two lines have information that describe the table. We need to look at both those lines to understand what the table contains. So we need a different function that will capture all those lines before the table. The funtion lines_table() described below will do this.
```
from eppy.results import readhtml # the eppy module with functions to read the html
fname = "../eppy/resources/outputfiles/V_8_1/ASHRAE30pct.PI.Final11_OfficeMedium_STD2010_Chicago-baseTable.html" # the html file you want to read
filehandle = open(fname, 'r').read() # get a file handle to the html file
ltables = readhtml.lines_table(filehandle) # reads the tables with their titles
```
The html snippet shown above is the last table in HTML file we just opened. We have used lines_table() to read the tables into the variable ltables. We can get to the last table by ltable[-1]. Let us print it and see what we have.
```
import pprint
pp = pprint.PrettyPrinter()
pp.pprint(ltables[-1])
```
We can see that ltables has captured all the lines before the table. Let us make our code more explicit to see this
```
last_ltable = ltables[-1]
lines_before_table = last_ltable[0]
table_itself = last_ltable[-1]
pp.pprint(lines_before_table)
```
We found this table the easy way this time, because we knew it was the last one. How do we find it if we don't know where it is in the file ? Python comes to our rescue :-) Let assume that we want to find the table that has the following two lines before it.
- Report: FANGER DURING COOLING AND ADAPTIVE COMFORT
- For: PERIMETER_MID_ZN_4
```
line1 = 'Report: FANGER DURING COOLING AND ADAPTIVE COMFORT'
line2 = 'For: PERIMETER_MID_ZN_4'
#
# check if those two lines are before the table
line1 in lines_before_table and line2 in lines_before_table
# find all the tables where those two lines are before the table
[ltable for ltable in ltables
if line1 in ltable[0] and line2 in ltable[0]]
```
That worked !
What if you want to find the words "FANGER" and "PERIMETER_MID_ZN_4" before the table. The following code will do it.
```
# sample code to illustrate what we are going to do
last_ltable = ltables[-1]
lines_before_table = last_ltable[0]
table_itself = last_ltable[-1]
# join lines_before_table into a paragraph of text
justtext = '\n'.join(lines_before_table)
print(justtext)
"FANGER" in justtext and "PERIMETER_MID_ZN_4" in justtext
# Let us combine the this trick to find the table
[ltable for ltable in ltables
if "FANGER" in '\n'.join(ltable[0]) and "PERIMETER_MID_ZN_4" in '\n'.join(ltable[0])]
```
## Extracting data from the tables
The tables in the HTML page in general have text in the top header row. The first vertical row has text. The remaining cells have numbers. We can identify the numbers we need by looking at the labelin the top row and the label in the first column. Let us construct a simple example and explore this.
```
# ignore the following three lines. I am using them to construct the table below
from IPython.display import HTML
atablestring = '<TABLE cellpadding="4" style="border: 1px solid #000000; border-collapse: collapse;" border="1">\n <TR>\n <TD> </TD>\n <TD>a b</TD>\n <TD>b c</TD>\n <TD>c d</TD>\n </TR>\n <TR>\n <TD>x y</TD>\n <TD>1</TD>\n <TD>2</TD>\n <TD>3</TD>\n </TR>\n <TR>\n <TD>y z</TD>\n <TD>4</TD>\n <TD>5</TD>\n <TD>6</TD>\n </TR>\n <TR>\n <TD>z z</TD>\n <TD>7</TD>\n <TD>8</TD>\n <TD>9</TD>\n </TR>\n</TABLE>'
HTML(atablestring)
```
This table is actually in the follwoing form:
```
atable = [["", "a b", "b c", "c d"],
["x y", 1, 2, 3 ],
["y z", 4, 5, 6 ],
["z z", 7, 8, 9 ],]
```
We can see the labels in the table. So we an look at row "x y" and column "c d". The value there is 3
right now we can get to it by saying atable[1][3]
```
print(atable[1][3])
```
readhtml has some functions that will let us address the values by the labels. We use a structure from python called named tuples to do this. The only limitation is that the labels have to be letters or digits. Named tuples does not allow spaces in the labels. We could replace the space with an underscore ' _ '. So "a b" will become "a_b". So we can look for row "x_y" and column "c_d". Let us try this out.
```
from eppy.results import readhtml
h_table = readhtml.named_grid_h(atable)
print(h_table.x_y.c_d)
```
We can still get to the value by index
```
print(h_table[0][2])
```
Note that we used atable[1][3], but here we used h_table[0][2]. That is because h_table does not count the rows and columns where the labels are.
We can also do the following:
```
print(h_table.x_y[2])
# or
print(h_table[0].c_d)
```
Wow … that is pretty cool. What if we want to just check what the labels are ?
```
print(h_table._fields)
```
That gives us the horizontal lables. How about the vertical labels ?
```
h_table.x_y._fields
```
There you go !!!
How about if I want to use the labels differently ? Say I want to refer to the row first and then to the column. That woul be saying table.c_d.x_y. We can do that by using a different function
```
v_table = readhtml.named_grid_v(atable)
print(v_table.c_d.x_y)
```
And we can do the following
```
print(v_table[2][0])
print(v_table.c_d[0])
print(v_table[2].x_y)
```
Let us try to get the numbers in the first column and then get their sum
```
v_table.a_b
```
Look like we got the right column. But not in the right format. We really need a list of numbers
```
[cell for cell in v_table.a_b]
```
That looks like waht we wanted. Now let us get the sum
```
values_in_first_column = [cell for cell in v_table.a_b]
print(values_in_first_column)
print(sum(values_in_first_column)) # sum is a builtin function that will sum a list
```
To get the first row we use the variable h_table
```
values_in_first_row = [cell for cell in h_table.x_y]
print(values_in_first_row)
print(sum(values_in_first_row))
```
## Fast HTML table file read
To read the html table files you would usually use the functions described in [Reading outputs from E+](./Outputs_Tutorial.html). For instance you would use the functions as shown below.
```
from eppy.results import readhtml # the eppy module with functions to read the html
import pprint
pp = pprint.PrettyPrinter()
fname = "../eppy/resources/outputfiles/V_7_2/5ZoneCAVtoVAVWarmestTempFlowTable_ABUPS.html" # the html file you want to read
html_doc = open(fname, 'r').read()
htables = readhtml.titletable(html_doc) # reads the tables with their titles
firstitem = htables[0]
pp.pprint(firstitem)
```
`titletable` reads all the tables in the HTML file. With large E+ models, this file can be extremeely large and `titletable` will load all the tables into memory. This can take several minutes. If you are trying to get one table or one value from a table, waiting several minutes for you reseult can be exessive.
If you know which table you are looking for, there is a faster way of doing this. We used index=0 in the above example to get the first table. If you know the index of the file you are looking for, you can use a faster function to get the table as shown below
```
from eppy.results import fasthtml
fname = "../eppy/resources/outputfiles/V_7_2/5ZoneCAVtoVAVWarmestTempFlowTable_ABUPS.html" # the html file you want to read
filehandle = open(fname, 'r') # get a file handle to the html file
firsttable = fasthtml.tablebyindex(filehandle, 0)
pp.pprint(firstitem)
```
You can also get the table if you know the title of the table. This is the **bold** text just before the table in the HTML file. The title of our table is **Site and Source Energy**. The function `tablebyname` will get us the table.
```
filehandle = open(fname, 'r') # get a file handle to the html file
namedtable = fasthtml.tablebyname(filehandle, "Site and Source Energy")
pp.pprint(namedtable)
```
Couple of things to note here:
- We have to open the file again using `filehandle = open(fname, 'r')`
- This is because both `tablebyname` and `tablebyindex` will close the file once they are done
- Some tables do not have a **bold title** just before the table. `tablebyname` will not work for those functions
| github_jupyter |
# `clean_country()`: Clean and validate countries and regions
## Introduction
The function `clean_country()` cleans a column containing country names and/or [ISO 3166](https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes) country codes, and standardizes them in a desired format. The function `validate_country()` validates either a single country or a column of countries, returning True if the value is valid, and False otherwise. The countries/regions supported and the regular expressions used can be found on [github](https://github.com/sfu-db/dataprep/blob/develop/dataprep/clean/country_data.tsv).
Countries can be converted to and from the following formats via the `input_format` and `output_format` parameters:
* Short country name (name): "United States"
* Official state name (official): "United States of America"
* ISO 3166-1 alpha-2 (alpha-2): "US"
* ISO 3166-1 alpha-3 (alpha-3): "USA"
* ISO 3166-1 numeric (numeric): "840"
`input_format` can also be set to "auto" which automatically infers the input format.
The `strict` parameter allows for control over the type of matching used for the "name" and "official" input formats.
* False (default for `clean_country()`), search the input for a regex match
* True (default for `validate_country()`), look for a direct match with a country value in the same format
The `fuzzy_dist` parameter sets the maximum edit distance (number of single character insertions, deletions or substitutions required to change one word into the other) allowed between the input and a country regex.
* 0 (default), countries at most 0 edits from matching a regex are successfully cleaned
* 1, countries at most 1 edit from matching a regex are successfully cleaned
* n, countries at most n edits from matching a regex are successfully cleaned
Invalid parsing is handled with the `errors` parameter:
* "coerce" (default), invalid parsing will be set as NaN
* "ignore", then invalid parsing will return the input
* "raise", then invalid parsing will raise an exception
After cleaning, a **report** is printed that provides the following information:
* How many values were cleaned (the value must be transformed)
* How many values could not be parsed
* And the data summary: how many values in the correct format, and how many values are null
The following sections demonstrate the functionality of `clean_country()` and `validate_country()`.
### An example dirty dataset
```
import pandas as pd
import numpy as np
df = pd.DataFrame({"messy_country":
["Canada", "foo canada bar", "cnada", "northern ireland",
" ireland ", "congo, kinshasa", "congo, brazzaville",
304, "233", " tr ", "ARG", "hello", np.nan, "NULL"]
})
df
```
## 1. Default `clean_country()`
By default, the `input_format` parameter is set to "auto" (automatically determines the input format), the `output_format` parameter is set to "name". The `fuzzy_dist` parameter is set to 0 and `strict` is False. The `errors` parameter is set to "coerce" (set NaN when parsing is invalid).
```
from dataprep.clean import clean_country
clean_country(df, "messy_country")
```
Note "Canada" is considered not cleaned in the report since it's cleaned value is the same as the input. Also, "northern ireland" is invalid because it is part of the United Kingdom. Kinshasa and Brazzaville are the capital cities of their respective countries.
## 2. Input formats
This section demonstrates the supported country input formats.
### name
If the input contains a match with one of the country regexes then it is successfully converted.
```
clean_country(df, "messy_country", input_format="name")
```
### official
Does the same thing as `input_format = "name"`.
```
clean_country(df, "messy_country", input_format="official")
```
### alpha-2
Looks for a direct match with a ISO 3166-1 alpha-2 country code, case insensitive and ignoring leading and trailing whitespace.
```
clean_country(df, "messy_country", input_format="alpha-2")
```
### alpha-3
Looks for a direct match with a ISO 3166-1 alpha-3 country code, case insensitive and ignoring leading and trailing whitespace.
```
clean_country(df, "messy_country", input_format="alpha-3")
```
### numeric
Looks for a direct match with a ISO 3166-1 numeric country code, case insensitive and ignoring leading and trailing whitespace. Works on integers and strings.
```
clean_country(df, "messy_country", input_format="numeric")
```
## 3. Output formats
This section demonstrates the supported output country formats.
### official
```
clean_country(df, "messy_country", output_format="official")
```
### alpha-2
```
clean_country(df, "messy_country", output_format="alpha-2")
```
### alpha-3
```
clean_country(df, "messy_country", output_format="alpha-3")
```
### numeric
```
clean_country(df, "messy_country", output_format="numeric")
```
### Any combination of input and output formats may be used.
```
clean_country(df, "messy_country", input_format="alpha-2", output_format="official")
```
## 4. `strict` parameter
This parameter allows for control over the type of matching used for "name" and "official" input formats. When False, the input is searched for a regex match. When True, matching is done by looking for a direct match with a country in the same format.
```
clean_country(df, "messy_country", strict=True)
```
"foo canada bar", "congo kinshasa" and "congo brazzaville" are now invalid because they are not a direct match with a country in the "name" or "official" formats.
## 5. Fuzzy Matching
The `fuzzy_dist` parameter sets the maximum edit distance (number of single character insertions, deletions or substitutions required to change one word into the other) allowed between the input and a country regex. If an input is successfully cleaned by `clean_country()` with `fuzzy_dist = 0` then that input with one character inserted, deleted or substituted will match with `fuzzy_dist = 1`. This parameter only applies to the "name" and "official" input formats.
### `fuzzy_dist = 1`
Countries at most one edit away from matching a regex are successfully cleaned.
```
df = pd.DataFrame({"messy_country":
["canada", "cnada", "australa", "xntarctica", "koreea", "cxnda",
"afghnitan", "country: cnada", "foo indnesia bar"]
})
clean_country(df, "messy_country", fuzzy_dist=1)
```
### `fuzzy_dist = 2`
Countries at most two edits away from matching a regex are successfully cleaned.
```
clean_country(df, "messy_country", fuzzy_dist=2)
```
## 6. `inplace` parameter
This just deletes the given column from the returned dataframe.
A new column containing cleaned coordinates is added with a title in the format `"{original title}_clean"`.
```
clean_country(df, "messy_country", fuzzy_dist=2, inplace=True)
```
## 7. `validate_country()`
`validate_lat_long()` returns True when the input is a valid country value otherwise it returns False. Valid types are the same as `clean_country()`. By default `strict = True`, as opposed to `clean_country()` which has `strict` set to False by default. The default `input_type` is "auto".
```
from dataprep.clean import validate_country
print(validate_country("switzerland"))
print(validate_country("country = united states"))
print(validate_country("country = united states", strict=False))
print(validate_country("ca"))
print(validate_country(800))
```
### `validate_country()` on a pandas series
Since `strict = True` by default, the inputs "foo canada bar", "congo, kinshasa" and "congo, brazzaville" are invalid since they don't directly match a country in the "name" or "official" formats.
```
df = pd.DataFrame({"messy_country":
["Canada", "foo canada bar", "cnada", "northern ireland",
" ireland ", "congo, kinshasa", "congo, brazzaville",
304, "233", " tr ", "ARG", "hello", np.nan, "NULL"]
})
df["valid"] = validate_country(df["messy_country"])
df
```
### `strict = False`
For "name" and "official" input types the input is searched for a regex match.
```
df["valid"] = validate_country(df["messy_country"], strict=False)
df
```
### Specifying `input_format`
```
df["valid"] = validate_country(df["messy_country"], input_format="numeric")
df
```
## Credit
The country data and regular expressions used are based on the [country_converter](https://github.com/konstantinstadler/country_converter) project.
| github_jupyter |
### 자료형
- 리스트
- 튜플
- 딕셔너리
### 리스트
- 순서가 있는 데이터의 집합
```
# 리스트 변수 선언하는 방법
ls = [1,"python",False]
print(type(ls), ls)
# 리스트 데이터를 문자열로 합쳐주는 함수 - join
a = ["data", "science", "python"]
result = "@".join(a)
print(result, type(result))
```
### quiz 1
- 리스트 데이터를 문자열로 합쳐서 문장으로 만들어 주세요.
- 가장 앞글자는 대문자, 마지막에는 마침표를 찍어주세요.
ls =["python","is", "best", "programming", "language"]
```
ls =["python", "is", "best", "programming", "language"]
data1 = ls[0][0].upper() + ls[0][1:]
data2 = " ".join(ls[1:])
print(data1 + " " + data2 + ".")
ls =["python", "is", "best", "programming", "language"]
result = " ".join(ls)
result = result[0].upper() + result[1:] + "."
print(result)
```
#### 리스트의 오프셋
- 문자열은 하나의 문자를 오프셋 단위로 인식
- 리스트는 하나의 값을 오프셋 단위로 인식
```
a = ["python", "data", 1, True, "fast"]
print(a[1])
print(a[2:])
print(a[::-1])
print(a[-2:])
print(a[-2:][0])
```
### quiz 2
- 홀수 데이터를 거꾸로 출력하세요.
- 오프셋 사용
'''
ls = [0,1,2,3,4,5,6,7,8,9]
'''
-결과
'''
result => [9,7,5,3,1]
'''
```
ls = [0,1,2,3,4,5,6,7,8,9]
result = ls[::-2]
print(result)
ls = [0,1,2,3,4,5,6,7,8,9]
result = ls[1::2][::-1] #오프셋 사용
print(result)
# 리스트 데이터를 문자열 데이터로 바꿔주는 함수 : join
# 문자열 데이터를 리슷트 데이터로 바꿔주는 함수 : split
txt = "python data science"
ls = ["python", "data", "science"]
result = txt.split(" ")
print(result)
```
### 리스트 함수
- append : 데이터 추가
- sort : 데이터 정렬
- reverse : 데이터를 역순으로 정렬
```
# append 함수
ls = ["data", "science"]
ls.append("fastcampus")
print(ls)
# sort 함수
ls = ["fast", "data", "campus", "science"]
ls.sort()
print(ls)
# reverse 함수
ls.reverse()
print(ls)
ls[1] = "slow"
print(ls)
```
### quiz 3
- "Beautiful is better than ugly." 단어의 길이가 긴 순서대로 재정렬해서 문장을 다시 만드는 코드를 작성해주세요
- <list변수>.sort(key=len)
- 결과
'''
Beautiful better ugly than is
'''
```
data = ["abc", "bc", "d", "efgh"]
data.sort(key=len)
print(data)
data = ["Beautiful", "is", "better", "than", "ugly"]
data.sort(key=len)
data.reverse()
print(data)
txt = "Beautiful is better than ugly."
result = txt.lower()
result = result.replace(".","")
result = txt.split(" ")
result.sort(key=len)
result.reverse()
print(result)
txt = "Beautiful is better than ugly."
result = txt.lower()[:-1] # 굳이 소문자 만들어야하는지?
result = result.split(" ")
result.sort(key=len)
result.reverse()
result = " ".join(result)
result = result[0].upper() + result[1:] + "."
print(result)
```
### 튜플
- 리스트와 같지만 수정이 불가능한 데이터 타입
- 리스트보다 저장공간을 적게 사용하는 특징
```
# 튜플 선언
tp = (1, 2, 3)
print(type(tp), tp)
tp[1] = 10
# 튜플에서의 오프셋 인덱스 사용
tp[0::2]
import sys
ls = [1,2,3,4,5]
tp = (1,2,3,4,5)
print(sys.getsizeof(ls), "byte")
print(sys.getsizeof(tp), "byte")
```
### 딕셔너리
- 순서가 없는 데이터 집합
- 키, 값으로 구성되어 있는 데이터 타입
```
# 선언
dic = {
1:"one",
"A":["data", "science"],
"숫자":1234,
}
print(type(dic), dic)
# 데이너에 접급
dic["A"], dic[1]
# 데이터 수정
dic[1] = "하나"
dic
```
### quiz 1
- 아래의 테이블 데이터를 딕셔너리 데이터 타입으로 선언하세요.
key value
name python
adddr seoul
age 25
```
dic = {
"name":"python",
"addr":"seoul",
"age":25
}
print(dic)
```
# 딕셔너리에서 키값으로 사용할 수 있는 데이터 타입은 문자열과 정수 데이터 타입만 사용 가능합니다.
```
# 딕셔너리에서는 오프셋 인덱스 사용불가
dic = {"data1":1, "data2":2, "data3":3}
dic
dic[1] # []안에는 key 값이 들어가야함
```
#### 딕셔너리 함수
- keys() : 키 데이터만 가져오는 함수
- values() : 값 데이터만 가져오는 함수
- items() : 키와 값을 가져오는 함수
- update() : 두개의 딕셔너리를 합쳐주는 함수
```
dic = {
"data1":1,
"data2":2,
"data3":3,
}
dic
# keys
result = dic.keys()
print(type(result), result)
# values
result = dic.values()
result
# items
result = dic.items()
result
# update
dic1 = {1:"a", 2:"b"}
dic2 = {2:"c", 3:"d"}
# dic3 = {1:"a", 2:"c", 3:"d"}
dic1.update(dic2)
dic3 = dic1
print(dic3)
```
#### quiz 2
- "국어 점수는 80점, 영어 점수는 90점, 수학 점수는 70점" 이 데이터를 리스트, 튜플, 딕셔너리 데이터 타입으로 나타내세요.
```
ls = ["kor", 80, "eng", 90, "mat", 70]
tp = ("kor", 80, "eng", 90, "mat", 70)
dic = {"kor":80, "eng":90, "mat":70}
print(ls, tp, dic)
# answer
# List 1
sub = ["kor", "eng", "mat"]
sco = [80, 90, 70]
#print(sub, sco)
# List 2
ls = [("kor", 80), ("eng", 90), ("mat", 70)]
#print(ls)
# Tuple
tp = (("kor", 80), ("eng", 90), ("mat", 70))
#print(tp)
# Dictionary
dic = {
"kor":80,
"eng":90,
"mat":70
}
print(dic)
```
| github_jupyter |
# Applying the Expected Context Framework to the Switchboard Corpus
### Using `DualContextWrapper`
This notebook demonstrates how our implementation of the Expected Context Framework can be applied to the Switchboard dataset. See [this dissertation](https://tisjune.github.io/research/dissertation) for more details about the framework, and more comments on the below analyses.
This notebook will show how to apply `DualContextWrapper`, a wrapper transformer that keeps track of two instances of `ExpectedContextModelTransformer`. For a version of this demo that initializes two separate instances of `ExpectedContextModelTransformer` instead, and that more explicitly demonstrates that functionality, see [this notebook](https://github.com/CornellNLP/Cornell-Conversational-Analysis-Toolkit/blob/ecf/convokit/expected_context_framework/demos/switchboard_exploration_demo.ipynb).
```
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import math
import os
```
## 1. Loading and preprocessing the dataset
For this demo, we'll use the Switchboard corpus---a collection of telephone conversations which have been annotated with various dialog acts. More information on the dataset, as it exists in ConvoKit format, can be found [here](https://convokit.cornell.edu/documentation/switchboard.html); the original data is described [here](https://web.stanford.edu/~jurafsky/ws97/CL-dialog.pdf).
We will actually use a preprocessed version of the Switchboard corpus, which we can access below. Since Switchboard consists of transcribed telephone conversations, there are many disfluencies and backchannels, that make utterances messier, and that make it hard to identify what counts as an actual turn. In the version of the corpus we consider, for the purpose of demonstration, we remove the disfluencies and backchannels (acknowledging that we're discarding important parts of the conversations).
```
from convokit import Corpus
from convokit import download
# OPTION 1: DOWNLOAD CORPUS
# UNCOMMENT THESE LINES TO DOWNLOAD CORPUS
# DATA_DIR = '<YOUR DIRECTORY>'
# SW_CORPUS_PATH = download('switchboard-processed-corpus', data_dir=DATA_DIR)
# OPTION 2: READ PREVIOUSLY-DOWNLOADED CORPUS FROM DISK
# UNCOMMENT THIS LINE AND REPLACE WITH THE DIRECTORY WHERE THE TENNIS-CORPUS IS LOCATED
# SW_CORPUS_PATH = '<YOUR DIRECTORY>'
sw_corpus = Corpus(SW_CORPUS_PATH)
sw_corpus.print_summary_stats()
utt_eg_id = '3496-79'
```
as input, we use a preprocessed version of the utterance that only contains alphabetical words, found in the `alpha_text` metadata field.
```
sw_corpus.get_utterance(utt_eg_id).meta['alpha_text']
```
In order to avoid capturing topic-specific information, we restrict our analyses to a vocabulary of unigrams that occurs across many topics, and across many conversations:
```
from collections import defaultdict
topic_counts = defaultdict(set)
for ut in sw_corpus.iter_utterances():
topic = sw_corpus.get_conversation(ut.conversation_id).meta['topic']
for x in set(ut.meta['alpha_text'].lower().split()):
topic_counts[x].add(topic)
topic_counts = {x: len(y) for x, y in topic_counts.items()}
word_convo_counts = defaultdict(set)
for ut in sw_corpus.iter_utterances():
for x in set(ut.meta['alpha_text'].lower().split()):
word_convo_counts[x].add(ut.conversation_id)
word_convo_counts = {x: len(y) for x, y in word_convo_counts.items()}
min_topic_words = set(x for x,y in topic_counts.items() if y >= 33)
min_convo_words = set(x for x,y in word_convo_counts.items() if y >= 200)
vocab = sorted(min_topic_words.intersection(min_convo_words))
len(vocab)
from convokit.expected_context_framework import ColNormedTfidfTransformer, DualContextWrapper
```
## 2. Applying the Expected Context Framework
To apply the Expected Context Framework, we start by converting the input utterance text to an input vector representation. Here, we represent utterances in a term-document matrix that's _normalized by columns_ (empirically, we found that this ensures that the representations derived by the framework aren't skewed by the relative frequency of utterances). We use `ColNormedTfidfTransformer` transformer to do this:
```
tfidf_obj = ColNormedTfidfTransformer(input_field='alpha_text', output_field='col_normed_tfidf', binary=True, vocabulary=vocab)
_ = tfidf_obj.fit(sw_corpus)
_ = tfidf_obj.transform(sw_corpus)
```
We now use the Expected Context Framework. In short, the framework derives vector representations, and other characterizations, of terms and utterances that are based on their _expected conversational context_---i.e., the replies we expect will follow a term or utterance, or the preceding utterances that we expect the term/utterance will reply to.
We are going to derive characterizations based both on the _forwards_ context, i.e., the expected replies, and the _backwards_ context, i.e., the expected predecessors. We'll apply the framework in each direction, and then compare the characterizations that result. To take care of both interlocked models, we use the `DualContextWrapper` transformer, which will keep track of two `ExpectedContextModelTransformer`s: one that relates utterances to predecessors (`reply_to`), and that outputs utterance-level attributes with the prefix `bk`; the other that relates utterances to replies (`next_id`) and outputs utterance-level attributes with the prefix `fw`. These parameters are specified via the `context_fields` and `output_prefixes` arguments.
Other arguments passed:
* `vect_field` and `context_vect_field` respectively denote the input vector representations of utterances and context utterances that `ec_fw` will work with. Here, we'll use the same tf-idf representations that we just computed above.
* `n_svd_dims` denotes the dimensionality of the vector representations that `ec_fw` will output. This is something that you can play around with---for this dataset, we found that more dimensions resulted in messier output, and a coarser, lower-dimensional representation was slightly more interpretable. (Technical note: technically, `ec_fw` produces vector representations of dimension `n_svd_dims`-1, since by default, it removes the first latent dimension, which we find tends to strongly reflect term frequency.)
* `n_clusters` denotes the number of utterance types that `ec_fw` will infer, given the representations it computes. Note that this is an interpretative step: looking at clusters of utterances helps us get a sense of what information the representations are capturing; this value does not actually impact the representations and other characterizations we derive.
* `random_state` and `cluster_random_state` are fixed for this demo, so we produce deterministic output.
```
dual_context_model = DualContextWrapper(context_fields=['reply_to','next_id'], output_prefixes=['bk','fw'],
vect_field='col_normed_tfidf', context_vect_field='col_normed_tfidf',
n_svd_dims=15, n_clusters=2,
random_state=1000, cluster_random_state=1000)
```
We'll fit the transformer on the subset of utterances and replies that have at least 5 unigrams from our vocabulary.
```
dual_context_model.fit(sw_corpus,selector=lambda x: x.meta.get('col_normed_tfidf__n_feats',0)>=5,
context_selector=lambda x: x.meta.get('col_normed_tfidf__n_feats',0)>= 5)
```
### Interpreting derived representations
Before applying the two transformers, `ec_fw` and `ec_bk` to transform the corpus, we can examine the representations and characterizations it's derived over the training data (note that in this case, the training data is also the corpus that we analyze, but this needn't be the case in general---see [this demo](https://github.com/CornellNLP/Cornell-Conversational-Analysis-Toolkit/blob/master/convokit/expected_context_framework/demos/wiki_awry_demo.ipynb) for an example).
First, to interpret the representations derived by each model, we can inspect the clusters of representations that we've inferred, for both the forwards and backwards direction. We can access the forwards and backwards models as elements of the `ec_models` attribute. The following function calls print out representative terms and utterances, as well as context terms and utterances, per cluster (next two cells; note that the output is quite long).
```
dual_context_model.ec_models[0].print_clusters(corpus=sw_corpus)
dual_context_model.ec_models[1].print_clusters(corpus=sw_corpus)
```
demo continues below
We can see that in each case, two clusters emerge that roughly correspond to utterances recounting personal experiences, and those providing commentary, generally not about personal matters. We'll label them as such, noting that there's a roughly 50-50 split with slightly more "personal" utterances than "commentary" ones:
```
dual_context_model.ec_models[0].set_cluster_names(['personal', 'commentary'])
dual_context_model.ec_models[1].set_cluster_names(['commentary', 'personal'])
```
### Interpreting derived characterizations
The transformer also computes some term-level statistics, which we can return as a Pandas dataframe:
* forwards and backwards ranges (`fw_range` and `bk_range` respectively): we roughly interpret these as modeling the strengths of our forwards expectations of the replies that a term tends to get, or the backwards expectations of the predecessors that the term tends to follow.
* shift: this statistic corresponds to the distance between the backwards and forwards representations for each term; we interpret it as the extent to which a term shifts the focus of a conversation.
* orientation (`orn`): this statistic compares the relative magnitude of forwards and backwards ranges. In a [counseling conversation setting](https://www.cs.cornell.edu/~cristian/Orientation_files/orientation-forwards-backwards.pdf) we interpreted orientation as a measure of the relative extent to which an interlocutor aims to advance the conversation forwards with a term, versus address existing content.
```
term_df = dual_context_model.get_term_df()
term_df.head()
k=10
print('low orientation')
display(term_df.sort_values('orn').head(k)[['orn']])
print('high orientation')
display(term_df.sort_values('orn').tail(k)[['orn']])
print('\nlow shift')
display(term_df.sort_values('shift').head(k)[['shift']])
print('high shift')
display(term_df.sort_values('shift').tail(k)[['shift']])
```
### Deriving utterance-level representations
We now use the transformer to derive utterance-level characterizations, by transforming the corpus with it. Again, we focus on utterances that are sufficiently long:
```
_ = dual_context_model.transform(sw_corpus, selector=lambda x: x.meta.get('col_normed_tfidf__n_feats',0)>=5)
```
The `transform` function does the following.
First, it (or rather, its constituent `ExpectedContextModelTransformer`s) derives vector representations of utterances, stored as `fw_repr` and `bk_repr`:
```
sw_corpus.vectors
```
Next, it derives ranges of utterances, stored in the metadata as `fw_range` and `bk_range`:
```
eg_ut = sw_corpus.get_utterance(utt_eg_id)
print('Forwards range:', eg_ut.meta['fw_range'])
print('Backwards range:', eg_ut.meta['bk_range'])
```
It also assigns utterances to inferred types:
```
print('Forwards cluster:', eg_ut.meta['fw_clustering.cluster'])
print('Backwards cluster:', eg_ut.meta['bk_clustering.cluster'])
```
And computes orientations and shifts:
```
print('shift:', eg_ut.meta['shift'])
print('orientation:', eg_ut.meta['orn'])
```
## 3. Analysis: correspondence to discourse act labels
We explore the relation between the characterizations we've derived, and the various annotations that the utterances are labeled with (for more information on the annotation scheme, see the [manual here](https://web.stanford.edu/~jurafsky/ws97/manual.august1.html)). See [this dissertation](https://tisjune.github.io/research/dissertation) for further explanation of the analyses and findings below. A high-level comment is that this is a tough dataset for the framework to work with, given the relative lack of structure---something future work could think more carefully about.
To facilitate the analysis, we extract relevant utterance attributes into a Pandas dataframe:
```
df = sw_corpus.get_attribute_table('utterance',
['bk_clustering.cluster', 'fw_clustering.cluster',
'orn', 'shift', 'tags'])
df = df[df['bk_clustering.cluster'].notnull()]
```
We will stick to examining the 9 most common tags in the data:
```
tag_subset = ['aa', 'b', 'ba', 'h', 'ny', 'qw', 'qy', 'sd', 'sv']
for tag in tag_subset:
df['has_' + tag] = df.tags.apply(lambda x: tag in x.split())
```
To start, we explore how the forwards and backwards vector representations correspond to these labels. To do this, we will compute log-odds ratios between the inferred utterance clusters and these labels:
```
def compute_log_odds(col, bool_col, val_subset=None):
if val_subset is not None:
col_vals = val_subset
else:
col_vals = col.unique()
log_odds_entries = []
for val in col_vals:
val_true = sum((col == val) & bool_col)
val_false = sum((col == val) & ~bool_col)
nval_true = sum((col != val) & bool_col)
nval_false = sum((col != val) & ~bool_col)
log_odds_entries.append({'val': val, 'log_odds': np.log((val_true/val_false)/(nval_true/nval_false))})
return log_odds_entries
bk_log_odds = []
for tag in tag_subset:
entry = compute_log_odds(df['bk_clustering.cluster'],df['has_' + tag], ['commentary'])[0]
entry['tag'] = tag
bk_log_odds.append(entry)
bk_log_odds_df = pd.DataFrame(bk_log_odds).set_index('tag').sort_values('log_odds')[['log_odds']]
fw_log_odds = []
for tag in tag_subset:
entry = compute_log_odds(df['fw_clustering.cluster'],df['has_' + tag], ['commentary'])[0]
entry['tag'] = tag
fw_log_odds.append(entry)
fw_log_odds_df = pd.DataFrame(fw_log_odds).set_index('tag').sort_values('log_odds')[['log_odds']]
print('forwards types vs labels')
display(fw_log_odds_df.T)
print('--------------------------')
print('backwards types vs labels')
display(bk_log_odds_df.T)
```
Tags further towards the right of the above tables (more positive log-odds) are those that co-occur more with the `commentary` than the `personal` utterance type. We briefly note that both forwards and backwards representations seem to draw a distinction between `sv` (opinion statements) and `sd` (non-opinion statements).
Next, we explore how the orientation and shift statistics relate to these labels. To do this, we compare statistics for utterances with a particular label, to statistics for utterances without that label.
```
from scipy import stats
def cohend(d1, d2):
n1, n2 = len(d1), len(d2)
s1, s2 = np.var(d1, ddof=1), np.var(d2, ddof=1)
s = np.sqrt(((n1 - 1) * s1 + (n2 - 1) * s2) / (n1 + n2 - 2))
u1, u2 = np.mean(d1), np.mean(d2)
return (u1 - u2) / s
def get_pstars(p):
if p < 0.001:
return '***'
elif p < 0.01:
return '**'
elif p < 0.05:
return '*'
else: return ''
stat_col = 'orn'
entries = []
for tag in tag_subset:
has = df[df['has_' + tag]][stat_col]
hasnt = df[~df['has_' + tag]][stat_col]
entry = {'tag': tag, 'pval': stats.mannwhitneyu(has, hasnt)[1],
'cd': cohend(has, hasnt)}
entry['ps'] = get_pstars(entry['pval'] * len(tag_subset))
entries.append(entry)
orn_stat_df = pd.DataFrame(entries).set_index('tag').sort_values('cd')
orn_stat_df = orn_stat_df[np.abs(orn_stat_df.cd) >= .1]
stat_col = 'shift'
entries = []
for tag in tag_subset:
has = df[df['has_' + tag]][stat_col]
hasnt = df[~df['has_' + tag]][stat_col]
entry = {'tag': tag, 'pval': stats.mannwhitneyu(has, hasnt)[1],
'cd': cohend(has, hasnt)}
entry['ps'] = get_pstars(entry['pval'] * len(tag_subset))
entries.append(entry)
shift_stat_df = pd.DataFrame(entries).set_index('tag').sort_values('cd')
shift_stat_df = shift_stat_df[np.abs(shift_stat_df.cd) >= .1]
```
(We'll only show labels for which there's a sufficiently large difference, in cohen's delta, between utterances with and without the label)
```
print('orientation vs labels')
display(orn_stat_df.T)
print('--------------------------')
print('shift vs labels')
display(shift_stat_df.T)
```
We note that utterances containing questions (`qw`, `qy`) have higher shifts than utterances which do not. If you're familiar with the DAMSL designations for forwards and backwards looking communicative functions, the output for orientation might look a little puzzling/informative that our view of what counts as forwards/backwards is different from the view espoused by the annotation scheme. We discuss this further in [this dissertation](https://tisjune.github.io/research/dissertation).
## 4. Model persistence
Finally, we briefly demonstrate how the model can be saved and loaded for later use
```
DUAL_MODEL_PATH = os.path.join(SW_CORPUS_PATH, 'dual_model')
dual_context_model.dump(DUAL_MODEL_PATH)
```
We dump latent context representations, clustering information, and various input parameters, for each constituent `ExpectedContextModelTransformer`, in separate directories under `DUAL_MODEL_PATH`:
```
ls $DUAL_MODEL_PATH
```
To load the learned model, we start by initializing a new model:
```
dual_model_new = DualContextWrapper(context_fields=['reply_to','next_id'], output_prefixes=['bk_new','fw_new'],
vect_field='col_normed_tfidf', context_vect_field='col_normed_tfidf',
wrapper_output_prefix='new',
n_svd_dims=15, n_clusters=2,
random_state=1000, cluster_random_state=1000)
dual_model_new.load(DUAL_MODEL_PATH, model_dirs=['bk','fw'])
```
We see that using the re-loaded model to transform the corpus results in the same representations and characterizations as the original one:
```
_ = dual_model_new.transform(sw_corpus, selector=lambda x: x.meta.get('col_normed_tfidf__n_feats',0)>=5)
sw_corpus.vectors
np.allclose(sw_corpus.get_vectors('bk_new_repr'), sw_corpus.get_vectors('bk_repr'))
np.allclose(sw_corpus.get_vectors('fw_new_repr'), sw_corpus.get_vectors('fw_repr'))
for ut in sw_corpus.iter_utterances(selector=lambda x: x.meta.get('col_normed_tfidf__n_feats',0)>=5):
assert ut.meta['orn'] == ut.meta['new_orn']
assert ut.meta['shift'] == ut.meta['new_shift']
```
## 5. Pipeline usage
We also implement a pipeline that handles the following:
* processes text (via a pipeline supplied by the user)
* transforms text to input representation (via `ColNormedTfidfTransformer`)
* derives framework output (via `DualContextWrapper`)
```
from convokit.expected_context_framework import DualContextPipeline
# see `demo_text_pipelines.py` in this demo's directory for details
# in short, this pipeline will either output the `alpha_text` metadata field
# of an utterance, or write the utterance's `text` attribute into the `alpha_text`
# metadata field
from demo_text_pipelines import switchboard_text_pipeline
```
We initialize the pipeline with the following arguments:
* `text_field` specifies which utterance metadata field to use as text input
* `text_pipe` specifies the pipeline used to compute the contents of `text_field`
* `tfidf_params` specifies the parameters to be passed into the underlying `ColNormedTfidfTransformer` object
* `min_terms` specifies the minimum number of terms in the vocabulary that an utterance must contain for it to be considered in fitting and transforming the underlying `DualContextWrapper` object (see the `selector` argument passed into `dual_context_model.fit` above)
All other arguments are inherited from `DualContextWrapper`.
```
pipe_obj = DualContextPipeline(context_fields=['reply_to','next_id'],
output_prefixes=['bk','fw'],
text_field='alpha_text', text_pipe=switchboard_text_pipeline(),
tfidf_params={'binary': True, 'vocabulary': vocab},
min_terms=5,
n_svd_dims=15, n_clusters=2,
random_state=1000, cluster_random_state=1000)
# note this might output a warning that `col_normed_tfidf` already exists;
# that's okay: the pipeline is just recomputing this matrix
pipe_obj.fit(sw_corpus)
```
Note that the pipeline enables us to transform ad-hoc string input:
```
eg_ut_new = pipe_obj.transform_utterance('How old were you when you left ?')
# note these attributes have the exact same values as those of eg_ut, computed above
print('shift:', eg_ut_new.meta['shift'])
print('orientation:', eg_ut_new.meta['orn'])
```
| github_jupyter |
```
uploaded={}
from google.colab import files
##IMport all CSV files
upload1 = files.upload()
uploaded.update(upload1)
##This data was publicly scraped from https://cryptoslam.io/ for 21 nft collections
##The data was consolidated into a csv file that can be found at
##https://github.com/kfoxIsProgrammer/TwitterNfters/blob/main/data/raw/nft_price_data/NFT_Top_21.csv
import pandas as pd
import numpy as np
import io
import re
import locale
##Set for currency
locale.setlocale(locale.LC_ALL, '')
df = pd.read_csv(io.BytesIO(uploaded["NFT_Top_21.csv"]))
dfTweets = pd.read_csv(io.BytesIO(uploaded["BoredApeYachtClubSentiment.csv"]))
##List of indexs
collectionsToDealWith = ["Bored Ape Yacht Club"]
##Dictionaries for data config and output data
monthsToDealWithPerCollection = {"Bored Ape Yacht Club":{'start':"21-Jun", 'end':"21-Jul"}}
dataForOutput = {"Bored Ape Yacht Club" : {"21-Jun" : {'Sales':0, 'UniqueBuyers':0, 'Transactions':0}, "21-Jul" : {'Sales':0, 'UniqueBuyers':0, 'Transactions':0} }}
totalSalesOverTime=0
totalUniqueBuyersOverTime=0
totalTransactionsOverTime =0
totalTweetsOverTime=0
k=0
for collName in collectionsToDealWith:
startMonth = ""
endMonth = ""
for i in range(len(df.index)):
if(df['Name'][i] == collName):
##Deal with start month
if(startMonth == ""):
if(df['Month'][i] == monthsToDealWithPerCollection[collName]['start']):
startMonth = df['Month'][i]
##Collect data when we have are in the start month and stop when we are in the end month
if(startMonth != "" and endMonth == ""):
##Add totals Sales
dataForOutput[collName][df['Month'][i]]['Sales'] += locale.atof(df['Sales(USD)'][i][1:])
dataForOutput[collName][df['Month'][i]]['UniqueBuyers'] += df['UniqueBuyers'][i]
dataForOutput[collName][df['Month'][i]]['Transactions'] += df['Total Transactions'][i]
totalTweets = dfTweets.iloc[k]["NegativeTweets"] + dfTweets.iloc[k]["PositiveTweets"] + dfTweets.iloc[k]["NeutralTweets"]
totalSalesOverTime +=dataForOutput[collName][df['Month'][i]]['Sales']
totalTransactionsOverTime+=dataForOutput[collName][df['Month'][i]]['UniqueBuyers']
totalUniqueBuyersOverTime+=dataForOutput[collName][df['Month'][i]]['Transactions']
totalTweetsOverTime+=totalTweets
print("Bored Ape Yacht Club")
print("Time Frame - " + str(startMonth) + " - " + str(endMonth))
print("TotalSales in USD - " + str(locale.currency(dataForOutput['Bored Ape Yacht Club'][df['Month'][i]]['Sales'], grouping=True)))
print("Total Unique Buyers - " + str(dataForOutput['Bored Ape Yacht Club'][df['Month'][i]]['UniqueBuyers']))
print("Total Transactions - " + str(dataForOutput['Bored Ape Yacht Club'][df['Month'][i]]['Transactions']))
print("Dollar per tweet - " + str(locale.currency((dataForOutput['Bored Ape Yacht Club'][df['Month'][i]]['Sales'])/totalTweets)))
print("Avg Transaction " +str(locale.currency(dataForOutput['Bored Ape Yacht Club'][df['Month'][i]]['Sales'] / dataForOutput['Bored Ape Yacht Club'][df['Month'][i]]['Transactions'])))
print(totalTweets)
print()
k+=1
##End month
if(df['Name'][i] == "Bored Ape Yacht Club" and startMonth != "" and endMonth == "" and df['Month'][i] == monthsToDealWithPerCollection[collName]['end']):
endMonth = df['Month'][i]
print()
print("Bored Ape Yacht Club")
print("Time Frame - " + str(startMonth) + " - " + str(endMonth))
print("TotalSales in USD - " + str(locale.currency(totalSalesOverTime, grouping=True)))
print("Total Unique Buyers - " + str(totalUniqueBuyersOverTime))
print("Total Transactions - " + str(totalTransactionsOverTime))
print("Avg Transaction "+str(totalSalesOverTime/totalTransactionsOverTime))
print("Dollar per tweet - " + str(locale.currency(totalSalesOverTime/totalTweetsOverTime)))
print(totalTweetsOverTime)
print("Change in sales " +str(round(((dataForOutput['Bored Ape Yacht Club']["21-Jun"]['Sales'] / dataForOutput['Bored Ape Yacht Club']["21-Jul"]['Sales']) * 100),1)) +"%")
print("Change "+str(round(((dfTweets.iloc[0]["NegativeTweets"] + dfTweets.iloc[0]["PositiveTweets"] + dfTweets.iloc[0]["NeutralTweets"])/ (dfTweets.iloc[1]["NegativeTweets"] + dfTweets.iloc[1]["PositiveTweets"] + dfTweets.iloc[1]["NeutralTweets"]))*100,1)) +"%")
```
| github_jupyter |
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W2D3_DecisionMaking/student/W2D3_Tutorial3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Neuromatch Academy: Week 3, Day 2, Tutorial 3
# Linear Dynamical Systems & The Kalman Filter
__Content creators:__ Caroline Haimerl and Byron Galbraith
__Content reviewers:__ Jesse Livezey, Matt Krause, and Michael Waskom
**Useful reference:**
- Roweis, Ghahramani (1998): A unifying review of linear Gaussian Models
- Bishop (2006): Pattern Recognition and Machine Learning
**Acknowledgement**
This tutorial is in part based on code originally created by Caroline Haimerl for Dr. Cristina Savin's *Probabilistic Time Series* class at the Center for Data Science, New York University
```
#@title Video 1: Introduction
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="6f_51L3i5aQ", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
```
---
# Tutorial Objectives
In the previous tutorials we looked at inferring discrete latent states that give rise to our measurements. In this tutorial, we will learn how to infer a latent model when our states are continuous. Particular attention is paid to the Kalman filter and it's mathematical foundation.
In this tutorial, you will:
* Review linear dynamical systems
* Learn about and implement the Kalman filter
* Explore how the Kalman filter can be used to smooth data from an eye-tracking experiment
```
# Install PyKalman (https://pykalman.github.io/)
!pip install pykalman --quiet
# Imports
import numpy as np
import matplotlib.pyplot as plt
import pykalman
from scipy import stats
#@title Figure settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
#@title Data retrieval and loading
import io
import os
import hashlib
import requests
fname = "W2D3_mit_eyetracking_2009.npz"
url = "https://osf.io/jfk8w/download"
expected_md5 = "20c7bc4a6f61f49450997e381cf5e0dd"
if not os.path.isfile(fname):
try:
r = requests.get(url)
except requests.ConnectionError:
print("!!! Failed to download data !!!")
else:
if r.status_code != requests.codes.ok:
print("!!! Failed to download data !!!")
elif hashlib.md5(r.content).hexdigest() != expected_md5:
print("!!! Data download appears corrupted !!!")
else:
with open(fname, "wb") as fid:
fid.write(r.content)
def load_eyetracking_data(data_fname=fname):
with np.load(data_fname, allow_pickle=True) as dobj:
data = dict(**dobj)
images = [plt.imread(io.BytesIO(stim), format='JPG')
for stim in data['stimuli']]
subjects = data['subjects']
return subjects, images
#@title Helper functions
np.set_printoptions(precision=3)
def plot_kalman(state, observation, estimate=None, label='filter', color='r-',
title='LDS', axes=None):
if axes is None:
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(16, 6))
ax1.plot(state[:, 0], state[:, 1], 'g-', label='true latent')
ax1.plot(observation[:, 0], observation[:, 1], 'k.', label='data')
else:
ax1, ax2 = axes
if estimate is not None:
ax1.plot(estimate[:, 0], estimate[:, 1], color=color, label=label)
ax1.set(title=title, xlabel='X position', ylabel='Y position')
ax1.legend()
if estimate is None:
ax2.plot(state[:, 0], observation[:, 0], '.k', label='dim 1')
ax2.plot(state[:, 1], observation[:, 1], '.', color='grey', label='dim 2')
ax2.set(title='correlation', xlabel='latent', ylabel='observed')
else:
ax2.plot(state[:, 0], estimate[:, 0], '.', color=color,
label='latent dim 1')
ax2.plot(state[:, 1], estimate[:, 1], 'x', color=color,
label='latent dim 2')
ax2.set(title='correlation',
xlabel='real latent',
ylabel='estimated latent')
ax2.legend()
return ax1, ax2
def plot_gaze_data(data, img=None, ax=None):
# overlay gaze on stimulus
if ax is None:
fig, ax = plt.subplots(figsize=(8, 6))
xlim = None
ylim = None
if img is not None:
ax.imshow(img, aspect='auto')
ylim = (img.shape[0], 0)
xlim = (0, img.shape[1])
ax.scatter(data[:, 0], data[:, 1], c='m', s=100, alpha=0.7)
ax.set(xlim=xlim, ylim=ylim)
return ax
def plot_kf_state(kf, data, ax):
mu_0 = np.ones(kf.n_dim_state)
mu_0[:data.shape[1]] = data[0]
kf.initial_state_mean = mu_0
mu, sigma = kf.smooth(data)
ax.plot(mu[:, 0], mu[:, 1], 'limegreen', linewidth=3, zorder=1)
ax.scatter(mu[0, 0], mu[0, 1], c='orange', marker='>', s=200, zorder=2)
ax.scatter(mu[-1, 0], mu[-1, 1], c='orange', marker='s', s=200, zorder=2)
```
---
# Section 1: Linear Dynamical System (LDS)
```
#@title Video 2: Linear Dynamical Systems
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="2SWh639YgEg", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
```
Latent state variable: $$s_t = Fs_{t-1}+\zeta_t$$
Measured/observed variable: $$y_t = Hs_{t}+\eta_t$$
The latent state variable has dimension $D$ and the measured variable dimension $N$, dimensionality reduction here means that $D<N$.
Both latent and measured variable have Gaussian noise terms:
\begin{eqnarray}
\zeta_t & \sim & N(0, Q) \\
\eta_t & \sim & N(0, R) \\
s_0 & \sim & N(\mu_0, \Sigma_0)
\end{eqnarray}
As a consequence, $s_t$, $y_t$ and their joint distributions are Gaussian so we can easily compute the marginals and conditionals.
Just as in the HMM, the structure is that of a Markov chain where the state at time point $t$ is conditionally independent of previous states given the state at time point $t-1$.
## Section 1.1: Sampling
The first thing we will investigate is how to generate timecourse samples from a linear dynamical system given its parameters. We will start by defining the following system:
```
# task dimensions
n_dim_state = 2
n_dim_obs = 2
# initialize model parameters
params = {
'F': 0.5 * np.eye(n_dim_state), # state transition matrix
'Q': np.eye(n_dim_obs), # state noise covariance
'H': np.eye(n_dim_state), # observation matrix
'R': 0.1 * np.eye(n_dim_obs), # observation noise covariance
'mu_0': np.zeros(n_dim_state), # initial state mean
'sigma_0': 0.1 * np.eye(n_dim_state), # initial state noise covariance
}
```
**Note**: We used a parameter dictionary `params` above. As the number of parameters we need to provide to our functions increases, it can be beneficial to condense them into a data structure like this to clean up the number of inputs we pass in. The trade-off is that we have to know what is in our data structure to use those values, rather than looking at the function signature directly.
### Exercise 1: Sampling from a linear dynamical system
In this exercise you will implement the dynamics functions of a linear dynamical system to sample both a latent space trajectory (given parameters set above) and noisy measurements.
```
def sample_lds(n_timesteps, params, seed=0):
""" Generate samples from a Linear Dynamical System specified by the provided
parameters.
Args:
n_timesteps (int): the number of time steps to simulate
params (dict): a dictionary of model paramters: (F, Q, H, R, mu_0, sigma_0)
seed (int): a random seed to use for reproducibility checks
Returns:
ndarray, ndarray: the generated state and observation data
"""
n_dim_state = params['F'].shape[0]
n_dim_obs = params['H'].shape[0]
# set seed
np.random.seed(seed)
# precompute random samples from the provided covariance matrices
# mean defaults to 0
zi = stats.multivariate_normal(cov=params['Q']).rvs(n_timesteps)
eta = stats.multivariate_normal(cov=params['R']).rvs(n_timesteps)
# initialize state and observation arrays
state = np.zeros((n_timesteps, n_dim_state))
obs = np.zeros((n_timesteps, n_dim_obs))
###################################################################
## TODO for students: compute the next state and observation values
# Fill out function and remove
raise NotImplementedError("Student excercise: compute the next state and observation values")
###################################################################
# simulate the system
for t in range(n_timesteps):
# write the expressions for computing state values given the time step
if t == 0:
state[t] = ...
else:
state[t] = ...
# write the expression for computing the observation
obs[t] = ...
return state, obs
# Uncomment below to test your function
# state, obs = sample_lds(100, params)
# print('sample at t=3 ', state[3])
# plot_kalman(state, obs, title='sample')
```
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial3_Solution_8cfee88d.py)
*Example output:*
<img alt='Solution hint' align='left' width=1133 height=414 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D2_HiddenDynamics/static/W3D2_Tutorial3_Solution_8cfee88d_1.png>
### Interactive Demo: Adjusting System Dynamics
To test your understanding of the parameters of a linear dynamical system, think about what you would expect if you made the following changes:
1. Reduce observation noise $R$
2. Increase respective temporal dynamics $F$
Use the interactive widget below to vary the values of $R$ and $F$.
```
#@title
#@markdown Make sure you execute this cell to enable the widget!
@widgets.interact(R=widgets.FloatLogSlider(0.1, min=-3, max=1),
F=widgets.FloatSlider(0.5, min=0.0, max=1.0))
def explore_dynamics(R=0.1, F=0.5):
params = {
'F': F * np.eye(n_dim_state), # state transition matrix
'Q': np.eye(n_dim_obs), # state noise covariance
'H': np.eye(n_dim_state), # observation matrix
'R': R * np.eye(n_dim_obs), # observation noise covariance
'mu_0': np.zeros(n_dim_state), # initial state mean,
'sigma_0': 0.1 * np.eye(n_dim_state), # initial state noise covariance
}
state, obs = sample_lds(100, params)
plot_kalman(state, obs, title='sample')
```
---
# Section 2: Kalman Filtering
```
#@title Video 3: Kalman Filtering
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="VboZOV9QMOI", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
```
We want to infer the latent state variable $s_t$ given the measured (observed) variable $y_t$.
$$P(s_t|y_1, ..., y_t, y_{t+1}, ..., y_T)\sim N(\hat{\mu_t}, \hat{\Sigma_t})$$
First we obtain estimates of the latent state by running the filtering from $n=0,....N$.
$$s_t^{pred}\sim N(\hat{\mu}_t^{pred},\hat{\Sigma}_t^{pred})$$
Where $\hat{\mu}_t^{pred}$ and $\hat{\Sigma}_t^{pred}$ are derived as follows:
\begin{eqnarray}
\hat{\mu}_1^{pred} & = & F\hat{\mu}_{0} \\
\hat{\mu}_t^{pred} & = & F\hat{\mu}_{t-1}
\end{eqnarray}
*this is the prediction for $s_t$ obtained simply by taking the expected value of $s_{t-1}$ and projecting it forward one step using the transition probability matrix $A$*
\begin{eqnarray}
\hat{\Sigma}_0^{pred} & = & F\hat{\Sigma}_{0}F^T+Q \\
\hat{\Sigma}_t^{pred} & = & F\hat{\Sigma}_{t-1}F^T+Q
\end{eqnarray}
*same for the covariance taking into account the noise covariance $Q$*
update from observation to obtain $\hat{\mu}_t^{filter}$ and $\hat{\Sigma}_t^{filter}$
project to observational space:
$$y_t^{pred}\sim N(H\hat{\mu}_t^{pred}, H\hat{\Sigma}_t^{pred}H^T+R)$$
update prediction by actual data:
\begin{eqnarray}
s_t^{filter} & \sim & N(\hat{\mu}_t^{filter}, \hat{\Sigma}_t^{filter}) \\
\hat{\mu}_t^{filter} & = & \hat{\mu}_t^{pred}+K_t(y_t-H\hat{\mu}_t^{pred}) \\
\hat{\Sigma}_t^{filter} & = & (I-K_tH)\hat{\Sigma}_t^{pred}
\end{eqnarray}
Kalman gain matrix:
$$K_t=\hat{\Sigma}_t^{pred}H^T(H\hat{\Sigma}_t^{pred}H^T+R)^{-1}$$
*we use the latent-only prediction to project it to the observational space and compute a correction proportional to the error $y_t-HFz_{t-1}$ between prediction and data, coefficient of this correction is the Kalman gain matrix*
*if measurement noise is small and dynamics are fast -> estimation will depend mostly on observed data*
In order to explore the impact of filtering, we will use the following noisy periodic system:
```
# task dimensions
n_dim_state = 2
n_dim_obs = 2
# initialize model parameters
params = {
'F': np.array([[1., 1.], [-(2*np.pi/20.)**2., .9]]), # state transition matrix
'Q': np.eye(n_dim_obs), # state noise covariance
'H': np.eye(n_dim_state), # observation matrix
'R': 1.0 * np.eye(n_dim_obs), # observation noise covariance
'mu_0': np.zeros(n_dim_state), # initial state mean
'sigma_0': 0.1 * np.eye(n_dim_state), # initial state noise covariance
}
state, obs = sample_lds(100, params)
plot_kalman(state, obs, title='sample')
```
## Exercise 2: Implement Kalman filtering
In this exercise you will implement the Kalman filter (forward) process. Your focus will be on writing the expressions for the Kalman gain, filter mean, and filter covariance at each time step (refer to the equations above).
```
def kalman_filter(data, params):
""" Perform Kalman filtering (forward pass) on the data given the provided
system parameters.
Args:
data (ndarray): a sequence of osbervations of shape(n_timesteps, n_dim_obs)
params (dict): a dictionary of model paramters: (F, Q, H, R, mu_0, sigma_0)
Returns:
ndarray, ndarray: the filtered system means and noise covariance values
"""
# pulled out of the params dict for convenience
F = params['F']
Q = params['Q']
H = params['H']
R = params['R']
n_dim_state = F.shape[0]
n_dim_obs = H.shape[0]
I = np.eye(n_dim_state) # identity matrix
# state tracking arrays
mu = np.zeros((len(data), n_dim_state))
sigma = np.zeros((len(data), n_dim_state, n_dim_state))
# filter the data
for t, y in enumerate(data):
if t == 0:
mu_pred = params['mu_0']
sigma_pred = params['sigma_0']
else:
mu_pred = F @ mu[t-1]
sigma_pred = F @ sigma[t-1] @ F.T + Q
###########################################################################
## TODO for students: compute the filtered state mean and covariance values
# Fill out function and remove
raise NotImplementedError("Student excercise: compute the filtered state mean and covariance values")
###########################################################################
# write the expression for computing the Kalman gain
K = ...
# write the expression for computing the filtered state mean
mu[t] = ...
# write the expression for computing the filtered state noise covariance
sigma[t] = ...
return mu, sigma
# Uncomment below to test your function
# filtered_state_means, filtered_state_covariances = kalman_filter(obs, params)
# plot_kalman(state, obs, filtered_state_means, title="my kf-filter",
# color='r', label='my kf-filter')
```
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial3_Solution_e9df5afe.py)
*Example output:*
<img alt='Solution hint' align='left' width=1133 height=414 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D2_HiddenDynamics/static/W3D2_Tutorial3_Solution_e9df5afe_0.png>
---
# Section 3: Fitting Eye Gaze Data
```
#@title Video 4: Fitting Eye Gaze Data
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="M7OuXmVWHGI", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
```
Tracking eye gaze is used in both experimental and user interface applications. Getting an accurate estimation of where someone is looking on a screen in pixel coordinates can be challenging, however, due to the various sources of noise inherent in obtaining these measurements. A main source of noise is the general accuracy of the eye tracker device itself and how well it maintains calibration over time. Changes in ambient light or subject position can further reduce accuracy of the sensor. Eye blinks introduce a different form of noise as interruptions in the data stream which also need to be addressed.
Fortunately we have a candidate solution for handling noisy eye gaze data in the Kalman filter we just learned about. Let's look at how we can apply these methods to a small subset of data taken from the [MIT Eyetracking Database](http://people.csail.mit.edu/tjudd/WherePeopleLook/index.html) [[Judd et al. 2009](http://people.csail.mit.edu/tjudd/WherePeopleLook/Docs/wherepeoplelook.pdf)]. This data was collected as part of an effort to model [visual saliency](http://www.scholarpedia.org/article/Visual_salience) -- given an image, can we predict where a person is most likely going to look.
```
# load eyetracking data
subjects, images = load_eyetracking_data()
```
## Interactive Demo: Tracking Eye Gaze
We have three stimulus images and five different subjects' gaze data. Each subject fixated in the center of the screen before the image appeared, then had a few seconds to freely look around. You can use the widget below to see how different subjects visually scanned the presented image. A subject ID of -1 will show the stimulus images without any overlayed gaze trace.
Note that the images are rescaled below for display purposes, they were in their original aspect ratio during the task itself.
```
#@title
#@markdown Make sure you execute this cell to enable the widget!
@widgets.interact(subject_id=widgets.IntSlider(-1, min=-1, max=4),
image_id=widgets.IntSlider(0, min=0, max=2))
def plot_subject_trace(subject_id=-1, image_id=0):
if subject_id == -1:
subject = np.zeros((3, 0, 2))
else:
subject = subjects[subject_id]
data = subject[image_id]
img = images[image_id]
fig, ax = plt.subplots()
ax.imshow(img, aspect='auto')
ax.scatter(data[:, 0], data[:, 1], c='m', s=100, alpha=0.7)
ax.set(xlim=(0, img.shape[1]), ylim=(img.shape[0], 0))
```
## Section 3.1: Fitting data with `pykalman`
Now that we have data, we'd like to use Kalman filtering to give us a better estimate of the true gaze. Up until this point we've known the parameters of our LDS, but here we need to estimate them from data directly. We will use the `pykalman` package to handle this estimation using the EM algorithm.
Before exploring fitting models with `pykalman` it's worth pointing out some naming conventions used by the library:
$$
\begin{align}
F &: \texttt{transition_matrices} &
Q &: \texttt{transition_covariance}\\
H &:\texttt{observation_matrices} &
R &:\texttt{observation_covariance}\\
\mu_0 &: \texttt{initial_state_mean} & \Sigma_0 &: \texttt{initial_state_covariance}
\end{align}
$$
The first thing we need to do is provide a guess at the dimensionality of the latent state. Let's start by assuming the dynamics line-up directly with the observation data (pixel x,y-coordinates), and so we have a state dimension of 2.
We also need to decide which parameters we want the EM algorithm to fit. In this case, we will let the EM algorithm discover the dynamics parameters i.e. the $F$, $Q$, $H$, and $R$ matrices.
We set up our `pykalman` `KalmanFilter` object with these settings using the code below.
```
# set up our KalmanFilter object and tell it which parameters we want to
# estimate
np.random.seed(1)
n_dim_obs = 2
n_dim_state = 2
kf = pykalman.KalmanFilter(
n_dim_state=n_dim_state,
n_dim_obs=n_dim_obs,
em_vars=['transition_matrices', 'transition_covariance',
'observation_matrices', 'observation_covariance']
)
```
Because we know from the reported experimental design that subjects fixated in the center of the screen right before the image appears, we can set the initial starting state estimate $\mu_0$ as being the center pixel of the stimulus image (the first data point in this sample dataset) with a correspondingly low initial noise covariance $\Sigma_0$. Once we have everything set, it's time to fit some data.
```
# Choose a subject and stimulus image
subject_id = 1
image_id = 2
data = subjects[subject_id][image_id]
# Provide the initial states
kf.initial_state_mean = data[0]
kf.initial_state_covariance = 0.1*np.eye(n_dim_state)
# Estimate the parameters from data using the EM algorithm
kf.em(data)
print(f'F =\n{kf.transition_matrices}')
print(f'Q =\n{kf.transition_covariance}')
print(f'H =\n{kf.observation_matrices}')
print(f'R =\n{kf.observation_covariance}')
```
We see that the EM algorithm has found fits for the various dynamics parameters. One thing you will note is that both the state and observation matrices are close to the identity matrix, which means the x- and y-coordinate dynamics are independent of each other and primarily impacted by the noise covariances.
We can now use this model to smooth the observed data from the subject. In addition to the source image, we can also see how this model will work with the gaze recorded by the same subject on the other images as well, or even with different subjects.
Below are the three stimulus images overlayed with recorded gaze in magenta and smoothed state from the filter in green, with gaze begin (orange triangle) and gaze end (orange square) markers.
```
#@title
#@markdown Make sure you execute this cell to enable the widget!
@widgets.interact(subject_id=widgets.IntSlider(1, min=0, max=4))
def plot_smoothed_traces(subject_id=0):
subject = subjects[subject_id]
fig, axes = plt.subplots(ncols=3, figsize=(18, 4))
for data, img, ax in zip(subject, images, axes):
ax = plot_gaze_data(data, img=img, ax=ax)
plot_kf_state(kf, data, ax)
```
Why do you think one trace from one subject was sufficient to provide a decent fit across all subjects? If you were to go back and change the subject_id and/or image_id for when we fit the data using EM, do you think the fits would be different?
Finally, recall that the orignial task was to use this data to help devlop models of visual salience. While our Kalman filter is able to provide smooth estimates of observed gaze data, it's not telling us anything about *why* the gaze is going in a certain direction. In fact, if we sample data from our parameters and plot them, we get what amounts to a random walk.
```
kf_state, kf_data = kf.sample(len(data))
ax = plot_gaze_data(kf_data, img=images[2])
plot_kf_state(kf, kf_data, ax)
```
This should not be surprising, as we have given the model no other observed data beyond the pixels at which gaze was detected. We expect there is some other aspect driving the latent state of where to look next other than just the previous fixation location.
In summary, while the Kalman filter is a good option for smoothing the gaze trajectory itself, especially if using a lower-quality eye tracker or in noisy environmental conditions, a linear dynamical system may not be the right way to approach the much more challenging task of modeling visual saliency.
# Bonus
## Review on Gaussian joint, marginal and conditional distributions
Assume
\begin{eqnarray}
z & = & [x^Ty^T]^T \\
z & = & \begin{bmatrix}x \\y\end{bmatrix}\sim N\left(\begin{bmatrix}a \\b\end{bmatrix}, \begin{bmatrix}A & C \\C^T & B\end{bmatrix}\right)
\end{eqnarray}
then the marginal distributions are
\begin{eqnarray}
x & \sim & N(a, A) \\
y & \sim & N(b,B)
\end{eqnarray}
and the conditional distributions are
\begin{eqnarray}
x|y & \sim & N(a+CB^{-1}(y-b), A-CB^{-1}C^T) \\
y|x & \sim & N(b+C^TA^{-1}(x-a), B-C^TA^{-1}C)
\end{eqnarray}
*important take away: given the joint Gaussian distribution we can derive the conditionals*
## Kalman Smoothing
```
#@title Video 5: Kalman Smoothing and the EM Algorithm
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="4Ar2mYz1Nms", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
```
Obtain estimates by propagating from $y_T$ back to $y_0$ using results of forward pass ($\hat{\mu}_t^{filter}, \hat{\Sigma}_t^{filter}, P_t=\hat{\Sigma}_{t+1}^{pred}$)
\begin{eqnarray}
s_t & \sim & N(\hat{\mu}_t^{smooth}, \hat{\Sigma}_t^{smooth}) \\
\hat{\mu}_t^{smooth} & = & \hat{\mu}_t^{filter}+J_t(\hat{\mu}_{t+1}^{smooth}-F\hat{\mu}_t^{filter}) \\
\hat{\Sigma}_t^{smooth} & = & \hat{\Sigma}_t^{filter}+J_t(\hat{\Sigma}_{t+1}^{smooth}-P_t)J_t^T \\
J_t & = & \hat{\Sigma}_t^{filter}F^T P_t^{-1}
\end{eqnarray}
This gives us the final estimate for $z_t$.
\begin{eqnarray}
\hat{\mu}_t & = & \hat{\mu}_t^{smooth} \\
\hat{\Sigma}_t & = & \hat{\Sigma}_t^{smooth}
\end{eqnarray}
### Exercise 3: Implement Kalman smoothing
In this exercise you will implement the Kalman smoothing (backward) process. Again you will focus on writing the expressions for computing the smoothed mean, smoothed covariance, and $J_t$ values.
```
def kalman_smooth(data, params):
""" Perform Kalman smoothing (backward pass) on the data given the provided
system parameters.
Args:
data (ndarray): a sequence of osbervations of shape(n_timesteps, n_dim_obs)
params (dict): a dictionary of model paramters: (F, Q, H, R, mu_0, sigma_0)
Returns:
ndarray, ndarray: the smoothed system means and noise covariance values
"""
# pulled out of the params dict for convenience
F = params['F']
Q = params['Q']
H = params['H']
R = params['R']
n_dim_state = F.shape[0]
n_dim_obs = H.shape[0]
# first run the forward pass to get the filtered means and covariances
mu, sigma = kalman_filter(data, params)
# initialize state mean and covariance estimates
mu_hat = np.zeros_like(mu)
sigma_hat = np.zeros_like(sigma)
mu_hat[-1] = mu[-1]
sigma_hat[-1] = sigma[-1]
# smooth the data
for t in reversed(range(len(data)-1)):
sigma_pred = F @ sigma[t] @ F.T + Q # sigma_pred at t+1
###########################################################################
## TODO for students: compute the smoothed state mean and covariance values
# Fill out function and remove
raise NotImplementedError("Student excercise: compute the smoothed state mean and covariance values")
###########################################################################
# write the expression to compute the Kalman gain for the backward process
J = ...
# write the expression to compute the smoothed state mean estimate
mu_hat[t] = ...
# write the expression to compute the smoothed state noise covariance estimate
sigma_hat[t] = ...
return mu_hat, sigma_hat
# Uncomment once the kalman_smooth function is complete
# smoothed_state_means, smoothed_state_covariances = kalman_smooth(obs, params)
# axes = plot_kalman(state, obs, filtered_state_means, color="r",
# label="my kf-filter")
# plot_kalman(state, obs, smoothed_state_means, color="b",
# label="my kf-smoothed", axes=axes)
```
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial3_Solution_a0f4822b.py)
*Example output:*
<img alt='Solution hint' align='left' width=1133 height=414 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D2_HiddenDynamics/static/W3D2_Tutorial3_Solution_a0f4822b_0.png>
**Forward vs Backward**
Now that we have implementations for both, let's compare their peformance by computing the MSE between the filtered (forward) and smoothed (backward) estimated states and the true latent state.
```
print(f"Filtered MSE: {np.mean((state - filtered_state_means)**2):.3f}")
print(f"Smoothed MSE: {np.mean((state - smoothed_state_means)**2):.3f}")
```
In this example, the smoothed estimate is clearly superior to the filtered one. This makes sense as the backward pass is able to use the forward pass estimates and correct them given all the data we've collected.
So why would you ever use Kalman filtering alone, without smoothing? As Kalman filtering only depends on already observed data (i.e. the past) it can be run in a streaming, or on-line, setting. Kalman smoothing relies on future data as it were, and as such can only be applied in a batch, or off-line, setting. So use Kalman filtering if you need real-time corrections and Kalman smoothing if you are considering already-collected data.
## The Expectation-Maximization (EM) Algorithm
- want to maximize $log p(y|\theta)$
- need to marginalize out latent state *(which is not tractable)*
$$p(y|\theta)=\int p(y,s|\theta)dz$$
- add a probability distribution $q(s)$ which will approximate the latent state distribution
$$log p(y|\theta)\int_s q(s)dz$$
- can be rewritten as
$$\mathcal{L}(q,\theta)+KL\left(q(s)||p(s|y),\theta\right)$$
- $\mathcal{L}(q,\theta)$ contains the joint distribution of $y$ and $s$
- $KL(q||p)$ contains the conditional distribution of $s|y$
#### Expectation step
- parameters are kept fixed
- find a good approximation $q(s)$: maximize lower bound $\mathcal{L}(q,\theta)$ with respect to $q(s)$
- (already implemented Kalman filter+smoother)
#### Maximization step
- keep distribution $q(s)$ fixed
- change parameters to maximize the lower bound $\mathcal{L}(q,\theta)$
As mentioned, we have already effectively solved for the E-Step with our Kalman filter and smoother. The M-step requires further derivation, which is covered in the Appendix. Rather than having you implement the M-Step yourselves, let's instead turn to using a library that has already implemented EM for exploring some experimental data from cognitive neuroscience.
### The M-step for a LDS
*(see Bishop, chapter 13.3.2 Learning in LDS)*
Update parameters of the probability distribution
*For the updates in the M-step we will need the following posterior marginals obtained from the Kalman smoothing results* $\hat{\mu}_t^{smooth}, \hat{\Sigma}_t^{smooth}$
$$
\begin{eqnarray}
E(s_t) &=& \hat{\mu}_t \\
E(s_ts_{t-1}^T) &=& J_{t-1}\hat{\Sigma}_t+\hat{\mu}_t\hat{\mu}_{t-1}^T\\
E(s_ts_{t}^T) &=& \hat{\Sigma}_t+\hat{\mu}_t\hat{\mu}_{t}^T
\end{eqnarray}
$$
**Update parameters**
Initial parameters
$$
\begin{eqnarray}
\mu_0^{new}&=& E(s_0)\\
Q_0^{new} &=& E(s_0s_0^T)-E(s_0)E(s_0^T) \\
\end{eqnarray}
$$
Hidden (latent) state parameters
$$
\begin{eqnarray}
F^{new} &=& \left(\sum_{t=2}^N E(s_ts_{t-1}^T)\right)\left(\sum_{t=2}^N E(s_{t-1}s_{t-1}^T)\right)^{-1} \\
Q^{new} &=& \frac{1}{T-1} \sum_{t=2}^N E\big(s_ts_t^T\big) - F^{new}E\big(s_{t-1}s_{t}^T\big) - E\big(s_ts_{t-1}^T\big)F^{new}+F^{new}E\big(s_{t-1}s_{t-1}^T\big)\big(F^{new}\big)^{T}\\
\end{eqnarray}
$$
Observable (measured) space parameters
$$H^{new}=\left(\sum_{t=1}^N y_t E(s_t^T)\right)\left(\sum_{t=1}^N E(s_t s_t^T)\right)^{-1}$$
$$R^{new}=\frac{1}{T}\sum_{t=1}^Ny_ty_t^T-H^{new}E(s_t)y_t^T-y_tE(s_t^T)H^{new}+H^{new}E(s_ts_t^T)H_{new}$$
## Handling Eye Blinks
In the MIT Eyetracking Database, raw tracking data includes times when the subject blinked. The way this is represented in the data stream is via negative pixel coordinate values.
We could try to mitigate these samples by simply deleting them from the stream, though this introduces other issues. For instance, if each sample corresponds to a fixed time step, and you arbitrarily remove some samples, the integrity of that consistent timestep between samples is lost. It's sometimes better to flag data as missing rather than to pretend it was never there at all, especially with time series data.
Another solution is to used masked arrays. In `numpy`, a [masked array](https://numpy.org/doc/stable/reference/maskedarray.generic.html#what-is-a-masked-array) is an `ndarray` with an additional embedded boolean masking array that indicates which elements should be masked. When computation is performed on the array, the masked elements are ignored. Both `matplotlib` and `pykalman` work with masked arrays, and, in fact, this is the approach taken with the data we explore in this notebook.
In preparing the dataset for this noteook, the original dataset was preprocessed to set all gaze data as masked arrays, with the mask enabled for any pixel with a negative x or y coordinate.
| github_jupyter |
```
#python packages pd
import numpy as np
import matplotlib.pyplot as plt
#machine learning packages
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D, Bidirectional, Dropout
from keras.layers import CuDNNLSTM
from keras.utils.np_utils import to_categorical
# from keras.callbacks import EarlyStopping
from keras.layers import Dropout
from sklearn.model_selection import train_test_split
import importlib
#custom python scripts
import generator
import utilis
# Check that you are running GPU's
utilis.GPU_checker()
utilis.aws_setup()
%%time
# generators
importlib.reload(generator)
training_generator = generator.Keras_DataGenerator( dataset='train', w_hyp=False)
validation_generator = generator.Keras_DataGenerator(dataset='valid', w_hyp= False)
#Constants
# ARE YOU LOADINNG MODE?
VOCAB_SIZE = 1254
INPUT_LENGTH = 1000
EMBEDDING_DIM = 256
# # model
def build_model(vocab_size, embedding_dim, input_length):
model = Sequential()
model.add(Embedding(vocab_size, embedding_dim, input_length=input_length))
model.add(SpatialDropout1D(0.4))
model.add(Bidirectional(CuDNNLSTM(128)))
model.add(Dropout(0.4))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(41, activation='softmax'))
return model
model = build_model(VOCAB_SIZE, EMBEDDING_DIM, INPUT_LENGTH)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# print(model.summary())
## WARNING IF YOU CONTAIN MULTIPLE CORE GPUS
# NOTE unclear if these causes a speed up
# @TANCREDI, I HAVE TREID THIS ON JUST GOALS AND DOES NOT SEEM TO CAUSE A SPEED UP MAY
#CAUSE A SPEED UP IF WE USE HYPOTHESIS
# unclea rif this seepd
# from keras.utils import multi_gpu_model
# model_GPU = multi_gpu_model(model, gpus= 4)
# model_GPU.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
## ARE YOU LOADING A MODEL IF YES RUN TEH FOLLOWING LINES
# from keras.models import model_from_json
# json_file = open('model.json', 'r')
# loaded_model_json = json_file.read()
# json_file.close()
# loaded_model = model_from_json(loaded_model_json)
# # load weights into new model
# loaded_model.load_weights("model.h5")
# print("Loaded model from disk")
# # REMEMEBER TO COMPILE
# loaded_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#overwriting model
# model = loaded_model
print(model.summary())
%%time
n_epochs = 6
history = model.fit_generator(generator=training_generator,
# validation_data=validation_generator,
verbose=1,
use_multiprocessing= False,
epochs=n_epochs)
# FOR SAVING MODEL
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
#WARNING_DECIDE_HOW_TO_NAME_LOG
#descriptionofmodel_personwhostartsrun
#e.g. LSTM_128encoder_etc_tanc
LOSS_FILE_NAME = "SIMPLE_LSTM_SMALL_TANK"
#WARNING NUMBER 2 - CURRENTLY EVERYTIME YOU RERUN THE CELLS BELOW THE FILES WITH THOSE NAMES GET WRITTEN OVER
# save history - WARNING FILE NAME
utilis.history_saver_bad(history, LOSS_FILE_NAME)
# read numpy array
# history_toplot = np.genfromtxt("training_logs/"+ LOSS_FILE_NAME +".csv")
# plt.plot(history_toplot)
# plt.title('Loss history')
# plt.show()
%%time
n_epochs = 1
history = loaded_model.fit_generator(generator=training_generator,
validation_data=validation_generator,
verbose=1,
use_multiprocessing= False,
epochs=n_epochs)
```
| github_jupyter |
```
%matplotlib inline
```
Group Sparse inverse covariance for multi-subject connectome
=============================================================
This example shows how to estimate a connectome on a group of subjects
using the group sparse inverse covariance estimate.
```
import numpy as np
from nilearn import plotting
n_subjects = 4 # subjects to consider for group-sparse covariance (max: 40)
def plot_matrices(cov, prec, title, labels):
"""Plot covariance and precision matrices, for a given processing. """
prec = prec.copy() # avoid side effects
# Put zeros on the diagonal, for graph clarity.
size = prec.shape[0]
prec[list(range(size)), list(range(size))] = 0
span = max(abs(prec.min()), abs(prec.max()))
# Display covariance matrix
plotting.plot_matrix(cov, cmap=plotting.cm.bwr,
vmin=-1, vmax=1, title="%s / covariance" % title,
labels=labels)
# Display precision matrix
plotting.plot_matrix(prec, cmap=plotting.cm.bwr,
vmin=-span, vmax=span, title="%s / precision" % title,
labels=labels)
```
Fetching datasets
------------------
```
from nilearn import datasets
msdl_atlas_dataset = datasets.fetch_atlas_msdl()
adhd_dataset = datasets.fetch_adhd(n_subjects=n_subjects)
# print basic information on the dataset
print('First subject functional nifti image (4D) is at: %s' %
adhd_dataset.func[0]) # 4D data
```
Extracting region signals
--------------------------
```
from nilearn import image
from nilearn import input_data
# A "memory" to avoid recomputation
from sklearn.externals.joblib import Memory
mem = Memory('nilearn_cache')
masker = input_data.NiftiMapsMasker(
msdl_atlas_dataset.maps, resampling_target="maps", detrend=True,
low_pass=None, high_pass=0.01, t_r=2.5, standardize=True,
memory='nilearn_cache', memory_level=1, verbose=2)
masker.fit()
subject_time_series = []
func_filenames = adhd_dataset.func
confound_filenames = adhd_dataset.confounds
for func_filename, confound_filename in zip(func_filenames,
confound_filenames):
print("Processing file %s" % func_filename)
# Computing some confounds
hv_confounds = mem.cache(image.high_variance_confounds)(
func_filename)
region_ts = masker.transform(func_filename,
confounds=[hv_confounds, confound_filename])
subject_time_series.append(region_ts)
```
Computing group-sparse precision matrices
------------------------------------------
```
from nilearn.connectome import GroupSparseCovarianceCV
gsc = GroupSparseCovarianceCV(verbose=2)
gsc.fit(subject_time_series)
from sklearn import covariance
gl = covariance.GraphLassoCV(verbose=2)
gl.fit(np.concatenate(subject_time_series))
```
Displaying results
-------------------
```
atlas_img = msdl_atlas_dataset.maps
atlas_region_coords = plotting.find_probabilistic_atlas_cut_coords(atlas_img)
labels = msdl_atlas_dataset.labels
plotting.plot_connectome(gl.covariance_,
atlas_region_coords, edge_threshold='90%',
title="Covariance",
display_mode="lzr")
plotting.plot_connectome(-gl.precision_, atlas_region_coords,
edge_threshold='90%',
title="Sparse inverse covariance (GraphLasso)",
display_mode="lzr",
edge_vmax=.5, edge_vmin=-.5)
plot_matrices(gl.covariance_, gl.precision_, "GraphLasso", labels)
title = "GroupSparseCovariance"
plotting.plot_connectome(-gsc.precisions_[..., 0],
atlas_region_coords, edge_threshold='90%',
title=title,
display_mode="lzr",
edge_vmax=.5, edge_vmin=-.5)
plot_matrices(gsc.covariances_[..., 0],
gsc.precisions_[..., 0], title, labels)
plotting.show()
```
| github_jupyter |
```
import numpy as np
import time
import torch
import scipy.stats as stats
from scipy.special import gammaln
import train_2D_rt as tr
```
# Testing the Timing of exact CME vs. NN prediction
First, generate some number of parameters to use in timing. I'll start with 15 but maybe increase this?
```
set_size = 1
num_files = 15
N = num_files*set_size
params = tr.generate_param_vectors(N)
```
-----
## Timing for exact CME
Now, define the calculate exact CME function and get_moments. Get moments now accepts a multiple of sigma over which to calculate the solution.
```
def get_moments(p,N):
b,beta,gamma=p
r = torch.tensor([1/beta, 1/gamma])
MU = b*r
VAR = MU*torch.tensor([1+b,1+b*beta/(beta+gamma)])
STD = torch.sqrt(VAR)
xmax = torch.ceil(MU)
xmax = torch.ceil(xmax + N*STD)
xmax = torch.clip(xmax,30,np.inf).int()
return MU, VAR, STD, xmax
def calculate_exact_cme(p,method,N):
'''Given parameter vector p, calculate the exact probabilites using CME integrator.'''
p1 = torch.from_numpy(p).float()
p1 = 10**p1
MU, VAR, STD, xmaxc = get_moments(p1,N)
xmaxc = np.array([int(xmaxc[0]),int(xmaxc[1])])
y = tr.cme_integrator(np.array(p1),xmaxc+1,method=method)
return(xmaxc[0]*xmaxc[1])
```
----
## Increasing the State Space of Each Grid (multiple sigmas)
### Quad_vec
```
P = 15
sigmas = [1,2,3,5,10,15,25,50]
state_spaces = []
time_sigmas_fixedquad = []
for sig in sigmas:
print(sig)
t1 = time.time()
state_spaces_ = np.zeros(P)
for i in range(P):
s_ = calculate_exact_cme(params[i], method = 'fixed_quad',N=sig)
state_spaces_[i] = s_
state_spaces.append(state_spaces_)
t2 = time.time()
time_sigmas_fixedquad.append(t2-t1)
P = 15
sigmas = [1,2,3,5,10,15,25,50]
state_spaces = []
time_sigmas_quadvec = []
for sig in sigmas:
print(sig)
t1 = time.time()
state_spaces_ = np.zeros(P)
for i in range(P):
s_ = calculate_exact_cme(params[i], method = 'quad_vec',N=sig)
state_spaces_[i] = s_
state_spaces.append(state_spaces_)
t2 = time.time()
time_sigmas_quadvec.append(t2-t1)
```
------
# Increasing the Number of P vectors
```
P = 15
p_vecs = [1,2,3,5,10,15,25]
time_repeatP_fixedquad = []
for p in p_vecs:
print(p)
param_list = list(params)
params_ = np.array(p*list(params))
t1 = time.time()
for i in range(P*p):
s_ = calculate_exact_cme(params_[i], method = 'fixed_quad',N=1)
t2 = time.time()
time_repeatP_fixedquad.append(t2-t1)
P = 15
p_vecs = [1,2,3,5,10,15,25]
time_repeatP_quadvec = []
for p in p_vecs:
print(p)
param_list = list(params)
params_ = np.array(p*list(params))
t1 = time.time()
for i in range(P*p):
s_ = calculate_exact_cme(params_[i], method = 'quad_vec',N=1)
t2 = time.time()
time_repeatP_quadvec.append(t2-t1)
```
### Nice.
Great, we now have the timings for 1) increasing the grid size over which we integrate the exact CME and 2) increasing the number of parameters we use (kinda the same as increasing grid sizes, just in chunks? i think?) for 1) fixed_quad and 2) quad_vec.
Let's do the same timing tests for the NN, with several different generating basis functions.
------
# Timing for NN
First, I'll define the grid and get_ypred_at_RT functions!
```
def generate_grid(npdf,VAR,MU,quantiles=None):
if quantiles=='PRESET':
logstd = torch.sqrt(np.log((VAR/MU**2)+1))
logmean = torch.log(MU**2/np.sqrt(VAR+MU**2))
translin_0 = torch.exp(logmean[0]+logstd[0]*NORM_nas)
translin_1 = torch.exp(logmean[1]+logstd[1]*NORM_mat)
return translin_0,translin_1
return(translin)
def get_ypred_at_RT(p,npdf,w,N,hyp=2.4,quantiles='PRESET',
first_special=False,special_std='tail_prob'):
p = 10**p
MU, VAR, STD, xmax = get_moments(p,N)
#two separate variables. a bit ugly and leaves room for error.
grid_nas,grid_mat = generate_grid(npdf,VAR,MU,quantiles=quantiles)
# no zs implementation yet. not sure i want to implement it.
s_nas = torch.zeros(npdf[0])
s_mat = torch.zeros(npdf[1])
spec = 0 if first_special else -1
if first_special:
s_nas[1:] = torch.diff(grid_nas)
s_mat[1:] = torch.diff(grid_mat)
else: #last special... for now
s_nas[:-1] = torch.diff(grid_nas)
s_mat[:-1] = torch.diff(grid_mat)
if special_std == 'mean':
s_nas[spec] = grid_nas[spec]
s_mat[spec] = grid_mat[spec]
elif special_std == 'neighbor': #assign_neighbor_to_special
s_nas[spec] = s_nas[1] if first_special else s_nas[-2]
s_mat[spec] = s_mat[1] if first_special else s_mat[-2]
elif special_std == 'tail_prob':
if first_special:
print('If you are using this setting, you are doing something wrong.')
t_max = torch.log(p[1]/p[2])/(p[1] - p[2])
f = (torch.exp(-p[2]*t_max) - torch.exp(-p[1]*t_max)) * p[1]/(p[1] - p[2]) * p[0]
tailratio = 1/(1+1/f) #the mature tail ratio
s_mat[spec] = torch.sqrt(grid_mat[spec] / (1-tailratio))
tailratio = p[0]/(1+p[0]) #the nascent tail ratio
s_nas[spec] = torch.sqrt(grid_nas[spec] / (1-tailratio))
else:
print('did not specify a standard deviation convention!')
s_nas *= hyp
s_mat *= hyp
v_nas = s_nas**2
v_mat = s_mat**2
r_nas = grid_nas**2/(v_nas-grid_nas)
p_nas = 1-grid_nas/v_nas
r_mat = grid_mat**2/(v_mat-grid_mat)
p_mat = 1-grid_mat/v_mat
xgrid_nas = torch.arange(xmax[0]+1)
xgrid_mat = torch.arange(xmax[1]+1)
gammaln_xgrid_nas = lnfactorial[1:(xmax[0]+2)]
gammaln_xgrid_mat = lnfactorial[1:(xmax[1]+2)]
Y = torch.zeros((xmax[0]+1,xmax[1]+1))
for i in range(npdf[0]):
lnas = -grid_nas[i] + xgrid_nas * torch.log(grid_nas[i]) - gammaln_xgrid_nas
if p_nas[i] > 1e-10:
lnas += torch.special.gammaln(xgrid_nas+r_nas[i]) - torch.special.gammaln(r_nas[i]) \
- xgrid_nas*torch.log(r_nas[i] + grid_nas[i]) + grid_nas[i] \
+ r_nas[i]*torch.log(1-p_nas[i])
for j in range(npdf[1]):
lmat = - grid_mat[j] + xgrid_mat * torch.log(grid_mat[j]) - gammaln_xgrid_mat
if p_mat[j] > 1e-10:
lmat += torch.special.gammaln(xgrid_mat+r_mat[j]) - torch.special.gammaln(r_mat[j]) \
- xgrid_mat*torch.log(r_mat[j] + grid_mat[j]) + grid_mat[j] \
+ r_mat[j]*torch.log(1-p_mat[j]) #wasteful: we're recomputing a lot of stuff.
Y += w[i*npdf[1] + j] * torch.exp(lnas[:,None] + lmat[None,:])
#note convention change. Y = the predicted PMF is now returned in the same shape as the original histogram.
#this is fine bc Y is flattened anyway later on down the line.
return Y
# define NORM and YPRED_FUN
def NORM_function(npdf):
if npdf[0] == npdf[1]:
n = np.arange(npdf[0])
q = np.flip((np.cos((2*(n+1)-1)/(2*npdf)*np.pi)+1)/2)
NORM = stats.norm.ppf(q)
NORM_nas = torch.tensor(NORM)
NORM_mat = NORM_nas
else:
n = np.arange(npdf[0])
q = np.flip((np.cos((2*(n+1)-1)/(2*npdf[0])*np.pi)+1)/2)
#print(q)
NORM_nas = torch.tensor(stats.norm.ppf(q))
n = np.arange(npdf[1])
q = np.flip((np.cos((2*(n+1)-1)/(2*npdf[1])*np.pi)+1)/2)
#print(q)
NORM_mat = torch.tensor(stats.norm.ppf(q))
n_n = np.linspace(0,1,npdf[0]+2)[1:-1]
n_m = np.linspace(0,1,npdf[1]+2)[1:-1]
NORM_nas = stats.norm.ppf(n_n)
NORM_mat = stats.norm.ppf(n_m)
#print(NORM_nas)
return(NORM_nas,NORM_mat)
lnfactorial = torch.special.gammaln(torch.arange(10000000))
YPRED_FUN = lambda p, npdf, w, N: get_ypred_at_RT(p=p,npdf=npdf,w=w,N=N,hyp=2.4,
quantiles='PRESET')
def get_predicted_PMF(p_list,npdf,N,position,model,get_ypred_at_RT):
'''Returns predicted histogram for p given current state of model.'''
model.eval()
p1 = p_list[position:position+1]
w_p1 = model(p1)[0]
p1 = p1[0]
predicted_y1 = get_ypred_at_RT(p1,npdf,w_p1,N)
return(predicted_y1)
```
The next thing to do is load in the models. :)
I'll try for models with the following number of basis functions:
1. [10,11]
2. [20,21]
3. [30,31]
4. [50,51]
```
npdf = [10,11]
model_10 = tr.my_MLP1(3,npdf[0]*npdf[1])
model_10.load_state_dict(torch.load('./quadvec_models/10npdf_256params_qlin_MODEL'))
model_10.eval();
npdf = [20,21]
# pre-loaded model
model_20 = tr.my_MLP1(3,npdf[0]*npdf[1])
model_20.load_state_dict(torch.load('./quadvec_models/07032022_20npdf_1train_qlin_15epochs_MODEL'))
model_20.eval();
npdf = [30,31]
# pre-loaded model
model_30 = tr.my_MLP1(3,npdf[0]*npdf[1])
model_30.load_state_dict(torch.load('./quadvec_models/30npdf_256params_qlin_MODEL'))
model_30.eval();
npdf = [50,51]
# pre-loaded model
model_50 = tr.my_MLP1(3,npdf[0]*npdf[1])
model_50.load_state_dict(torch.load('./quadvec_models/50npdf_256params_qlin_MODEL'))
model_50.eval();
npdf = [30,31]
# pre-loaded model
model_30 = tr.my_MLP1(3,npdf[0]*npdf[1])
model_30.load_state_dict(torch.load('./quadvec_models/30npdf_256params_qlin_MODEL'))
model_30.eval();
```
# Increasing Sigma (grid size)
```
# need to work with tensors now!
params_tensor = torch.from_numpy(params).float()
# def get_predicted_PMF(p_list,npdf,position,model,get_ypred_at_RT)
P = 15
sigmas = [1,2,3,5,10,15,25,50]
npdf = [10,11]
time_sigmas_NN_10 = []
NORM_nas,NORM_mat = NORM_function(np.array(npdf))
for sig in sigmas:
print(sig)
t1 = time.time()
for i in range(P):
s_ = get_predicted_PMF(params_tensor[i:i+1],npdf,sig,0,model_10,
YPRED_FUN)
t2 = time.time()
time_sigmas_NN_10.append(t2-t1)
P = 15
sigmas = [1,2,3,5,10,15,25,50]
npdf = [20,21]
time_sigmas_NN_20 = []
NORM_nas,NORM_mat = NORM_function(np.array(npdf))
for sig in sigmas:
print(sig)
t1 = time.time()
for i in range(P):
s_ = get_predicted_PMF(params_tensor[i:i+1],npdf,sig,0,model_20,
YPRED_FUN)
t2 = time.time()
time_sigmas_NN_20.append(t2-t1)
P = 15
sigmas = [1,2,3,5,10,15,25,50]
npdf = [30,31]
time_sigmas_NN_30 = []
NORM_nas,NORM_mat = NORM_function(np.array(npdf))
for sig in sigmas:
print(sig)
t1 = time.time()
for i in range(P):
s_ = get_predicted_PMF(params_tensor[i:i+1],npdf,sig,0,model_30,
YPRED_FUN)
t2 = time.time()
time_sigmas_NN_30.append(t2-t1)
```
-----
# Calculating with increasing P vectors
```
time_repeatP_NN_10 = []
npdf = [10,11]
NORM_nas,NORM_mat = NORM_function(np.array(npdf))
for p in p_vecs:
print(p)
param_list = list(params)
params_ = np.array(p*list(params))
params_ = torch.from_numpy(params_).float()
t1 = time.time()
for i in range(P*p):
ss_ = get_predicted_PMF(params_[i:i+1],npdf,sig,0,model_10,
YPRED_FUN)
t2 = time.time()
time_repeatP_NN_10.append(t2-t1)
time_repeatP_NN_20 = []
npdf = [20,21]
NORM_nas,NORM_mat = NORM_function(np.array(npdf))
for p in p_vecs:
print(p)
param_list = list(params)
params_ = p*list(params)
params_ = torch.from_numpy(params_).float()
t1 = time.time()
for i in range(P*p):
ss_ = get_predicted_PMF(params_[i:i+1],npdf,sig,0,model_20,
YPRED_FUN)
t2 = time.time()
time_repeatP_NN_20.append(t2-t1)
time_repeatP_NN_30 = []
npdf = [30,31]
NORM_nas,NORM_mat = NORM_function(np.array(npdf))
for p in p_vecs:
print(p)
param_list = list(params)
params_ = p*list(params)
params_ = torch.from_numpy(params_).float()
t1 = time.time()
for i in range(P*p):
ss_ = get_predicted_PMF(params_[i:i+1],npdf,sig,0,model_30,
YPRED_FUN)
t2 = time.time()
time_repeatP_NN_30.append(t2-t1)
```
Amaxing! We now have the timing for various state spaces and generating methods.
Let's see how the timing looks.
This should be fairly interesting.
----
# Plotting
## Increasing Sigma
```
sigma_state_space = [np.sum(a) for a in state_spaces]
plt.plot(sigma_state_space,time_sigmas_quadvec,c='red',label='Quad Vec')
plt.plot(sigma_state_space,time_sigmas_fixedquad,c='green',label='Fixed Quad')
plt.plot(sigma_state_space,time_sigmas_NN_10,c='turquoise',label='NN, 10 basis')
plt.plot(sigma_state_space,time_sigmas_NN_20,c='teal',label='NN, 10 basis')
plt.plot(sigma_state_space,time_sigmas_NN_30,c='blue',label='NN, 10 basis')
plt.xlabel('State Space')
plt.ylabel('Generating Time')
plt.legend()
```
| github_jupyter |
#### Author: OMKAR PATHAK
# Arrays
## What is an Array?
* Array is a data structure used to store homogeneous elements at contiguous locations.
* One memory block is allocated for the entire array to hold the elements of the array. The array elements can be accessed in constant time by using the index of the parliculnr element as the subscript.
## Properties of Arrays:
* Arrays stores similar data types. That is, array can hold data of same data type values. This is one of the limitations of arrays compared to other data structures.
* Each value stored, in an array, is known as an element and all elements are indexed. The first element added, by default, gets 0 index. That is, the 5th element added gets an index number of 4.
* Elements can be retrieved by their index number. (__random access__)
* Array elements are stored in contiguous (continuous) memory locations.
* One array name can represent multiple values. Array is the easiest way to store a large quantity of data of same data types. For example, to store the salary of 100 employees, it is required to declare 100 variables. But with arrays, with one array name all the 100 employees salaries can be stored.
* At the time of creation itself, array size should be declared (array initialization does not require size).
## Arrays in Python:
Python does not have a native support for arrays, but has a more generic data structure called LIST. List provides all the options as array with more functionality.
But with few tweaks we can implement Array data structure in Python.
We will be seeing how to do this.
### Creating an array:
```
class Array(object):
''' sizeOfArray: denotes the total size of the array to be initialized
arrayType: denotes the data type of the array(as all the elements of the array have same data type)
arrayItems: values at each position of array
'''
def __init__(self, sizeOfArray, arrayType = int):
self.sizeOfArray = len(list(map(arrayType, range(sizeOfArray))))
self.arrayItems =[arrayType(0)] * sizeOfArray # initialize array with zeroes
def __str__(self):
return ' '.join([str(i) for i in self.arrayItems])
# function for search
def search(self, keyToSearch):
for i in range(self.sizeOfArray):
if (self.arrayItems[i] == keyToSearch): # brute-forcing
return i # index at which element/ key was found
return -1 # if key not found, return -1
# function for inserting an element
def insert(self, keyToInsert, position):
if(self.sizeOfArray > position):
for i in range(self.sizeOfArray - 2, position - 1, -1):
self.arrayItems[i + 1] = self.arrayItems[i]
self.arrayItems[position] = keyToInsert
else:
print('Array size is:', self.sizeOfArray)
# function to delete an element
def delete(self, keyToDelete, position):
if(self.sizeOfArray > position):
for i in range(position, self.sizeOfArray - 1):
self.arrayItems[i] = self.arrayItems[i + 1]
else:
print('Array size is:', self.sizeOfArray)
a = Array(10, int)
print(a)
```
### Common array operations:
* Search
* Insert
* Delete
__Time complexity__:
* Search: O(n)
* Insert: O(n)
* Delete: O(n)
* Indexing: O(1)
### Search Operation on Array:
```
a = Array(10, int)
index = a.search(0)
print('Element found at:', index)
```
### Insert Operation:
```
a = Array(10, int)
a.insert(1, 2)
a.insert(2,3)
a.insert(3,4)
print(a)
```
### Delete Operation:
```
a = Array(10, int)
a.insert(1, 2)
a.insert(2,3)
a.insert(3,4)
a.delete(3, 4)
print(a)
index = a.search(1)
print('Element found at:',index)
```
#### These were the basics of how to implement Array using Python. Now we will see how to use Python built-in module 'array'.
Syntax: array(dataType, valueList)
```
# importing 'array' module
import array
# initializing array
arr = array.array('i', [1, 2, 3, 4, 5]) # initialize array with integers ('i')
# printing original array
print ("The new created array is : ",end="")
for i in range (0, 5):
print (arr[i], end=" ")
# using append() to insert new value at end
arr.append(6);
# printing appended array
print ("\nThe appended array is : ", end="")
for i in range (0, 6):
print (arr[i], end=" ")
# using insert() to insert value at specific position
# inserts 5 at 2nd position
arr.insert(2, 5)
# printing array after insertion
print ("\nThe array after insertion is : ", end="")
for i in range (0, 7):
print (arr[i], end=" ")
arr.remove(1)
# deleting a value from array
print ("\nThe array after deletion is : ", end="")
for i in range (0, 6):
print (arr[i], end=" ")
```
### Disadvantages of Array
* __Fixed size__: The size of the array is static (specify the array size before using it, this can be overcome using Dynamic Arrays).
* __One block allocation__: To allocate the array itself at the beginning, sometimes it may not be possible to get the memory for the complete array (if the array size is big).
* __Complex position-based insertion__: To insert an element at a given position, we may need to shift the existing elements. This will create a position for us to insert the new element at the desired position. If the position at which we want to add an element is at the beginning, then the shifting operation is more expensive .
| github_jupyter |
# Underfitting and Overfitting demo using KNN
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
data = pd.read_csv('data_knn_classification_cleaned_titanic.csv')
data.head()
x = data.drop(['Survived'], axis=1)
y = data['Survived']
#Scaling the data
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
x = ss.fit_transform(x)
#split the data
from sklearn.model_selection import train_test_split
train_x, test_x, train_y, test_y = train_test_split(x, y, random_state=96, stratify=y)
```
# implementing KNN
```
#imporing KNN classifier and f1 score
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.metrics import f1_score
#creating an instance of KNN
clf = KNN(n_neighbors = 12)
clf.fit(train_x, train_y)
train_predict = clf.predict(train_x)
k1 = f1_score(train_predict, train_y)
print("training: ",k1)
test_predict = clf.predict(test_x)
k = f1_score(test_predict, test_y)
print("testing: ",k)
def f1score(k):
train_f1 = []
test_f1 = []
for i in k:
clf = KNN(n_neighbors = i)
clf.fit(train_x, train_y)
train_predict = clf.predict(train_x)
k1 = f1_score(train_predict, train_y)
train_f1.append(k1)
test_predict = clf.predict(test_x)
k = f1_score(test_predict, test_y)
test_f1.append(k)
return train_f1, test_f1
k = range(1,50)
train_f1, test_f1 = f1score(k)
train_f1, test_f1
score = pd.DataFrame({'train score': train_f1, 'test_score':test_f1}, index = k)
score
#visulaising
plt.plot(k, test_f1, color ='red', label ='test')
plt.plot(k, train_f1, color ='green', label ='train')
plt.xlabel('K Neighbors')
plt.ylabel('F1 score')
plt.title('f1 curve')
plt.ylim(0,4,1)
plt.legend()
#split the data
from sklearn.model_selection import train_test_split
train_x, test_x, train_y, test_y = train_test_split(x, y, random_state=42, stratify=y)
k = range(1,50)
train_f1, test_f1 = f1score(k)
#visulaising
plt.plot(k, test_f1, color ='red', label ='test')
plt.plot(k, train_f1, color ='green', label ='train')
plt.xlabel('K Neighbors')
plt.ylabel('F1 score')
plt.title('f1 curve')
#plt.ylim(0,4,1)
plt.legend()
'''
here the value of k is decided by using both train and test data
, instead of (testset) that we can use validation set
types:
1. Hold-out validation
as we directly divide the data into praprotions, there might be a
case where the validation set is biased to only one class
(which mean validation set might have data of only one class,
these results in set have no idea about the other class)
in this we have different distributions
2. Stratified hold out
in this we have equal distributions
in the hold out scenario we need good amount of data to maintain,
so we need to train with lot data. if the dataset is small?
and we want to bulid the complex relations out of them?
'''
```
# Bias Variance Tradeoff
```
'''
if variance is high then bias is low
if bias is high then variance is low
error high bias high variance optimally in btw
fit underfit overfit bestfit
k range 21<k k<11 12<k<21
complexity low high optimum
Generalization error : defines the optimum model btw high bias and high varaince
High variance refers to overfitting whereas high bias
refers to underfitting and we do not want both of these scenarios.
So, the best model is said to have low bias and low variance.
'''
```
| github_jupyter |
# ML.Net - StopWords
## Davi Ramos -> Cientista de Dados 👋
(davi.info@gmail.com)
[](https://www.linkedin.com/in/davi-ramos/)
[](https://twitter.com/Daviinfo/)
<a href="https://github.com/DaviRamos"><img src="https://img.shields.io/github/followers/DaviRamos.svg?label=GitHub&style=social" alt="GitHub"></a>
```
// ML.NET Nuget packages installation
//#r "nuget:Microsoft.ML,1.3.1"
#r "nuget:Microsoft.ML"
```
## Using C# Class
```
using Microsoft.ML;
using Microsoft.ML.Data;
using System;
using System.Collections.Generic;
using System.Text;
```
## Declare data-classes for input data and predictions
```
public class TextData
{
public string Text { get; set; }
}
public class TextTokens
{
public string[] Tokens { get; set; }
}
```
## Função Auxiliar para Imprimir os Tokens
```
private static void PrintTokens(TextTokens tokens)
{
Console.WriteLine(Environment.NewLine);
var sb = new StringBuilder();
foreach (var token in tokens.Tokens)
{
sb.AppendLine(token);
}
Console.WriteLine(sb.ToString());
}
var context = new MLContext();
var emptyData = new List<TextData>();
var data = context.Data.LoadFromEnumerable(emptyData);
var tokenization = context.Transforms.Text.TokenizeIntoWords("Tokens", "Text", separators: new[] { ' ', '.', ',' })
.Append(context.Transforms.Text.RemoveDefaultStopWords("Tokens", "Tokens",
Microsoft.ML.Transforms.Text.StopWordsRemovingEstimator.Language.English));
var stopWordsModel = tokenization.Fit(data);
var engine = context.Model.CreatePredictionEngine<TextData, TextTokens>(stopWordsModel);
var newText = engine.Predict(new TextData { Text = "This is a test sentence, and it is a long one." });
PrintTokens(newText);
var customTokenization = context.Transforms.Text.TokenizeIntoWords("Tokens", "Text", separators: new[] { ' ', '.', ',' })
.Append(context.Transforms.Text.RemoveStopWords("Tokens", "Tokens", new[] { "and", "a" }));
var customStopWordsModel = customTokenization.Fit(data);
var customEngine = context.Model.CreatePredictionEngine<TextData, TextTokens>(customStopWordsModel);
var newCustomText = customEngine.Predict(new TextData { Text = "This is a test sentence, and it is a long one." });
PrintTokens(newCustomText);
Console.ReadLine();
```
| github_jupyter |
# [ATM 623: Climate Modeling](../index.ipynb)
[Brian E. J. Rose](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany
# Lecture 17: Ice albedo feedback in the EBM
### About these notes:
This document uses the interactive [`IPython notebook`](http://ipython.org/notebook.html) format (now also called [`Jupyter`](https://jupyter.org)). The notes can be accessed in several different ways:
- The interactive notebooks are hosted on `github` at https://github.com/brian-rose/ClimateModeling_courseware
- The latest versions can be viewed as static web pages [rendered on nbviewer](http://nbviewer.ipython.org/github/brian-rose/ClimateModeling_courseware/blob/master/index.ipynb)
- A complete snapshot of the notes as of May 2015 (end of spring semester) are [available on Brian's website](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/Notes/index.html).
Many of these notes make use of the `climlab` package, available at https://github.com/brian-rose/climlab
## Contents
1. [Interactive snow and ice line in the EBM](#section1)
2. [Polar-amplified warming in the EBM](#section2)
3. [Effects of diffusivity in the annual mean EBM with albedo feedback](#section3)
4. [Diffusive response to a point source of energy](#section4)
____________
<a id='section1'></a>
## 1. Interactive snow and ice line in the EBM
____________
### The annual mean EBM
the equation is
$$ C(\phi) \frac{\partial T_s}{\partial t} = (1-\alpha) ~ Q - \left( A + B~T_s \right) + \frac{D}{\cos\phi } \frac{\partial }{\partial \phi} \left( \cos\phi ~ \frac{\partial T_s}{\partial \phi} \right) $$
### Temperature-dependent ice line
Let the surface albedo be larger wherever the temperature is below some threshold $T_f$:
$$ \alpha\left(\phi, T(\phi) \right) = \left\{\begin{array}{ccc}
\alpha_0 + \alpha_2 P_2(\sin\phi) & ~ & T(\phi) > T_f \\
a_i & ~ & T(\phi) \le T_f \\
\end{array} \right. $$
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import climlab
# for convenience, set up a dictionary with our reference parameters
param = {'A':210, 'B':2, 'a0':0.3, 'a2':0.078, 'ai':0.62, 'Tf':-10.}
model1 = climlab.EBM_annual( num_lat=180, D=0.55, **param )
print model1
```
Because we provided a parameter `ai` for the icy albedo, our model now contains several sub-processes contained within the process called `albedo`. Together these implement the step-function formula above.
The process called `iceline` simply looks for grid cells with temperature below $T_f$.
```
print model1.param
def ebm_plot( model, figsize=(8,12), show=True ):
'''This function makes a plot of the current state of the model,
including temperature, energy budget, and heat transport.'''
templimits = -30,35
radlimits = -340, 340
htlimits = -7,7
latlimits = -90,90
lat_ticks = np.arange(-90,90,30)
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(3,1,1)
ax1.plot(model.lat, model.Ts)
ax1.set_xlim(latlimits)
ax1.set_ylim(templimits)
ax1.set_ylabel('Temperature (deg C)')
ax1.set_xticks( lat_ticks )
ax1.grid()
ax2 = fig.add_subplot(3,1,2)
ax2.plot(model.lat, model.diagnostics['ASR'], 'k--', label='SW' )
ax2.plot(model.lat, -model.diagnostics['OLR'], 'r--', label='LW' )
ax2.plot(model.lat, model.diagnostics['net_radiation'], 'c-', label='net rad' )
ax2.plot(model.lat, model.heat_transport_convergence(), 'g--', label='dyn' )
ax2.plot(model.lat, model.diagnostics['net_radiation'].squeeze()
+ model.heat_transport_convergence(), 'b-', label='total' )
ax2.set_xlim(latlimits)
ax2.set_ylim(radlimits)
ax2.set_ylabel('Energy budget (W m$^{-2}$)')
ax2.set_xticks( lat_ticks )
ax2.grid()
ax2.legend()
ax3 = fig.add_subplot(3,1,3)
ax3.plot(model.lat_bounds, model.heat_transport() )
ax3.set_xlim(latlimits)
ax3.set_ylim(htlimits)
ax3.set_ylabel('Heat transport (PW)')
ax3.set_xlabel('Latitude')
ax3.set_xticks( lat_ticks )
ax3.grid()
return fig
model1.integrate_years(5)
f = ebm_plot(model1)
model1.diagnostics['icelat']
```
____________
<a id='section2'></a>
## 2. Polar-amplified warming in the EBM
____________
### Add a small radiative forcing
The equivalent of doubling CO2 in this model is something like
$$ A \rightarrow A - \delta A $$
where $\delta A = 4$ W m$^{-2}$.
```
deltaA = 4.
model2 = climlab.process_like(model1)
model2.subprocess['LW'].A = param['A'] - deltaA
model2.integrate_years(5, verbose=False)
plt.plot(model1.lat, model1.Ts)
plt.plot(model2.lat, model2.Ts)
```
The warming is polar-amplified: more warming at the poles than elsewhere.
Why?
Also, the current ice line is now:
```
model2.diagnostics['icelat']
```
There is no ice left!
Let's do some more greenhouse warming:
```
model3 = climlab.process_like(model1)
model3.subprocess['LW'].A = param['A'] - 2*deltaA
model3.integrate_years(5, verbose=False)
plt.plot(model1.lat, model1.Ts)
plt.plot(model2.lat, model2.Ts)
plt.plot(model3.lat, model3.Ts)
plt.xlim(-90, 90)
plt.grid()
```
In the ice-free regime, there is no polar-amplified warming. A uniform radiative forcing produces a uniform warming.
____________
<a id='section3'></a>
## 3. Effects of diffusivity in the annual mean EBM with albedo feedback
____________
### In-class investigation:
We will repeat the exercise from Lecture 14, but this time with albedo feedback included in our model.
- Solve the annual-mean EBM (integrate out to equilibrium) over a range of different diffusivity parameters.
- Make three plots:
- Global-mean temperature as a function of $D$
- Equator-to-pole temperature difference $\Delta T$ as a function of $D$
- Poleward heat transport across 35 degrees $\mathcal{H}_{max}$ as a function of $D$
- Choose a value of $D$ that gives a reasonable approximation to observations:
- $\Delta T \approx 45$ ºC
Use these parameter values:
```
param = {'A':210, 'B':2, 'a0':0.3, 'a2':0.078, 'ai':0.62, 'Tf':-10.}
print param
```
### One possible way to do this:
```
Darray = np.arange(0., 2.05, 0.05)
model_list = []
Tmean_list = []
deltaT_list = []
Hmax_list = []
for D in Darray:
ebm = climlab.EBM_annual(num_lat=360, D=D, **param )
#ebm.subprocess['insolation'].s2 = -0.473
ebm.integrate_years(5., verbose=False)
Tmean = ebm.global_mean_temperature()
deltaT = np.max(ebm.Ts) - np.min(ebm.Ts)
HT = ebm.heat_transport()
#Hmax = np.max(np.abs(HT))
ind = np.where(ebm.lat_bounds==35.5)[0]
Hmax = HT[ind]
model_list.append(ebm)
Tmean_list.append(Tmean)
deltaT_list.append(deltaT)
Hmax_list.append(Hmax)
color1 = 'b'
color2 = 'r'
fig = plt.figure(figsize=(8,6))
ax1 = fig.add_subplot(111)
ax1.plot(Darray, deltaT_list, color=color1, label='$\Delta T$')
ax1.plot(Darray, Tmean_list, '--', color=color1, label='$\overline{T}$')
ax1.set_xlabel('D (W m$^{-2}$ K$^{-1}$)', fontsize=14)
ax1.set_xticks(np.arange(Darray[0], Darray[-1], 0.2))
ax1.set_ylabel('Temperature ($^\circ$C)', fontsize=14, color=color1)
for tl in ax1.get_yticklabels():
tl.set_color(color1)
ax1.legend(loc='center right')
ax2 = ax1.twinx()
ax2.plot(Darray, Hmax_list, color=color2)
ax2.set_ylabel('Poleward heat transport across 35.5$^\circ$ (PW)', fontsize=14, color=color2)
for tl in ax2.get_yticklabels():
tl.set_color(color2)
ax1.set_title('Effect of diffusivity on EBM with albedo feedback', fontsize=16)
ax1.grid()
```
____________
<a id='section4'></a>
## 4. Diffusive response to a point source of energy
____________
Let's add a point heat source to the EBM and see what sets the spatial structure of the response.
We will add a heat source at about 45º latitude.
First, we will calculate the response in a model **without albedo feedback**.
```
param_noalb = {'A': 210, 'B': 2, 'D': 0.55, 'Tf': -10.0, 'a0': 0.3, 'a2': 0.078}
m1 = climlab.EBM_annual(num_lat=180, **param_noalb)
print m1
m1.integrate_years(5.)
m2 = climlab.process_like(m1)
point_source = climlab.process.energy_budget.ExternalEnergySource(state=m2.state)
ind = np.where(m2.lat == 45.5)
point_source.heating_rate['Ts'][ind] = 100.
m2.add_subprocess('point source', point_source)
print m2
m2.integrate_years(5.)
plt.plot(m2.lat, m2.Ts - m1.Ts)
plt.xlim(-90,90)
plt.grid()
```
The warming effects of our point source are felt **at all latitudes** but the effects decay away from the heat source.
Some analysis will show that the length scale of the warming is proportional to
$$ \sqrt{\frac{D}{B}} $$
so increases with the diffusivity.
Now repeat this calculate **with ice albedo feedback**
```
m3 = climlab.EBM_annual(num_lat=180, **param)
m3.integrate_years(5.)
m4 = climlab.process_like(m3)
point_source = climlab.process.energy_budget.ExternalEnergySource(state=m4.state)
point_source.heating_rate['Ts'][ind] = 100.
m4.add_subprocess('point source', point_source)
m4.integrate_years(5.)
plt.plot(m4.lat, m4.Ts - m3.Ts)
plt.xlim(-90,90)
plt.grid()
```
Now the maximum warming **does not coincide with the heat source at 45º**!
Our heat source has led to melting of snow and ice, which induces an additional heat source in the high northern latitudes.
**Heat transport communicates the external warming to the ice cap, and also commuicates the increased shortwave absorption due to ice melt globally!**
<div class="alert alert-success">
[Back to ATM 623 notebook home](../index.ipynb)
</div>
____________
## Credits
The author of this notebook is [Brian E. J. Rose](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany.
It was developed in support of [ATM 623: Climate Modeling](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/), a graduate-level course in the [Department of Atmospheric and Envionmental Sciences](http://www.albany.edu/atmos/index.php), offered in Spring 2015.
____________
____________
## Version information
____________
```
%install_ext http://raw.github.com/jrjohansson/version_information/master/version_information.py
%load_ext version_information
%version_information numpy, climlab
```
| github_jupyter |
```
import numpy as np
from scipy.stats import norm
from scipy.integrate import quad
from scipy.optimize import root
"""
%% Summary of CJK_Func.m %%
The function generates the value of CJK representation.
Note that, this function is not used directly, it is used to solve for Bt
--------------------------------------------------------------------------
Input:
Bt - the value of boundary at t
Bs - the value of boundary at s
k - strike price
r - risk-free interest rate
q - continuously compounded dividend rate
vol - annualized volatility
T - maturity
t - current time
--------------------------------------------------------------------------
Output:
y - value of CJK reprentation
--------------------------------------------------------------------------
Author:
Nattapong Kongmuang
nat.kmg@gmail.com
MSc Financial Engineering, ICMA Centre, Henley Business School,
University of Reading, UK
24 July 2015
--------------------------------------------------------------------------
"""
def d1(x,y,z,b,vol):
return (np.log(x/y)+(b+0.5*vol**2)*z)/(vol*np.sqrt(z))
def d2(x,y,z,b,vol):
return d1(x,y,z,b,vol)-vol*np.sqrt(z)
def CJK_Func( Bt,Bs,k,r,q,vol,T,t ):
T1 = T-t
b=r-q
term = np.zeros(5)
term[0] = Bt
term[1] = -k
term[2] = k*np.exp(-r*T1)*norm.cdf(-d2(Bt,k,T1,b,vol))
term[3] = -Bt*np.exp(-q*T1)*norm.cdf(-d1(Bt,k,T1,b,vol))
integralFun = lambda s: r*k*np.exp(-r*(s))*norm.cdf(-d2(Bt,Bs,(s),b,vol)) - q*Bt*np.exp(-q*(s))*norm.cdf(-d1(Bt,Bs,(s),b,vol))
term[4] = quad(integralFun,t,T)[0]
y = np.sum(term)
return y
"""
%% Summary of Boundary.m %%
The function generates the early exercise boundary and spot of time by
CJK representation
--------------------------------------------------------------------------
Input:
k - strike price
r - risk-free interest rate
q - continuously compounded dividend rate
vol - annualized volatility
T - time to maturity
steps - a number of time steps in the calculation
--------------------------------------------------------------------------
Output:
B - the values of early exercise boundary
time - the point of time that each B-value is calculated
--------------------------------------------------------------------------
Author:
Nattapong Kongmuang
nat.kmg@gmail.com
MSc Financial Engineering, ICMA Centre, Henley Business School,
University of Reading, UK
24 July 2015
--------------------------------------------------------------------------
"""
def Boundary( k,r,q,vol,T,steps ):
dt=T/steps
t=T
B = np.zeros(steps+1)
time = np.linspace(0,T,np.floor(dt).astype(np.int))
for i in range(steps,1,-1):
if i==steps:
if q == 0:
B[i]=np.min(k,k*r/q)
else:
B[i]=k
else:
t=t-dt
res=root(lambda Bt: CJK_Func(Bt,B[i+1],k,r,q,vol,T,t) ,k)
B[i] = res.x
return B
s0 = 100
k = 100
r = 0.07
q = 0.03
vol = 0.25
T=1
#paths = 100000
steps = 10000
dt=T/steps
B = Boundary( k,r,q,vol,T,steps)
from matplotlib import pyplot as plt
plt.plot(np.linspace(0,1,10001),B)
from scipy.stats.distributions import norm, lognorm, rv_frozen
class GeometricBrownianMotion:
'''Geometric Brownian Motion.(with optional drift).'''
def __init__(self, mu: float=0.0, sigma: float=1.0):
self.mu = mu
self.sigma = sigma
def simulate(self, t: np.array, n: int, rnd: np.random.RandomState) \
-> np.array:
assert t.ndim == 1, 'One dimensional time vector required'
assert t.size > 0, 'At least one time point is required'
dt = np.concatenate((t[0:1], np.diff(t)))
assert (dt >= 0).all(), 'Increasing time vector required'
# transposed simulation for automatic broadcasting
dW = (rnd.normal(size=(t.size, n)).T * np.sqrt(dt)).T
W = np.cumsum(dW, axis=0)
return np.exp(self.sigma * W.T + (self.mu - self.sigma**2 / 2) * t).T
def distribution(self, t: float) -> rv_frozen:
mu_t = (self.mu - self.sigma**2/2) * t
sigma_t = self.sigma * np.sqrt(t)
return lognorm(scale=np.exp(mu_t), s=sigma_t)
from scipy.optimize import newton
class LS:
def __init__(self, X, t, r, strike):
self.X = X
self.t = t
self.r = r
self.strike = strike
def _ls_american_option_quadratic_iter(self, X, t, r, strike):
# given no prior exercise we just receive the payoff of a European option
cashflow = np.maximum(strike - X[-1, :], 0.0)
# iterating backwards in time
for i in reversed(range(1, X.shape[1] - 1)):
# discount factor between t[i] and t[i+1]
df = np.exp(-r * (t[i+1]-t[i]))
# discount cashflows from next period
cashflow = cashflow * df
x = X[:, i]
# exercise value for time t[i]
exercise = np.maximum(strike - x, 0.0)
# boolean index of all in-the-money paths
itm = exercise > 0
# fit polynomial of degree 2
fitted = Polynomial.fit(x[itm], cashflow[itm], 2)
# approximate continuation value
continuation = fitted(x)
# boolean index where exercise is beneficial
ex_idx = itm & (exercise > continuation)
# update cashflows with early exercises
cashflow[ex_idx] = exercise[ex_idx]
func = cashflow - strike
res = newton(func,strike)
yield res,cashflow, x, fitted, continuation, exercise, ex_idx
def simulate(self):
for res,cashflow, *_ in self._ls_american_option_quadratic_iter(self.X, self.t, self.r, self.strike):
pass
return res,cashflow.mean(axis=0) * np.exp(-self.r * (self.t[1] - self.t[0]))
```
| github_jupyter |
# Optimal probabilistic clustering - Part II
> ...
- toc: true
- branch: master
- badges: true
- comments: true
- categories: [Clustering, Entropy, Membership Entropy]
- image: images/post_image_optimal_clustering.png
- hide: false
- search_exclude: false
- author: Joao Rodrigues
```
import numpy as np
from scipy.linalg import norm
from scipy.spatial.distance import cdist
class OPC:
def __init__(self, n_clusters=10, max_iter=150, m=2, error=1e-5, random_state=42):
self.u, self.centers = None, None
self.n_clusters = n_clusters
self.max_iter = max_iter
self.m = m
self.error = error
self.random_state = random_state
def fit(self, X, initial_centers=None):
N = X.shape[0]
C = self.n_clusters
centers = initial_centers
# u = np.random.dirichlet(np.ones(C), size=N)
r = np.random.RandomState(self.random_state)
u = r.rand(N,C)
u = u / np.tile(u.sum(axis=1)[np.newaxis].T,C)
iteration = 0
while iteration < self.max_iter:
u2 = u.copy()
if iteration==0 and not centers is None:
centers = centers
print(centers.shape)
print("-------------------------------------------")
else:
centers = self.next_centers(X, u)
u = self.next_u(X, centers)
iteration += 1
# Stopping rule
if norm(u - u2) < self.error:
break
self.u = u
self.centers = centers
return self
def next_centers(self, X, u):
um = u ** self.m
return (X.T @ um / np.sum(um, axis=0)).T
def next_u(self, X, centers):
return self._predict(X, centers)
def _predict(self, X, centers):
power = float(2 / (self.m - 1))
temp = cdist(X, centers) ** power
denominator_ = temp.reshape((X.shape[0], 1, -1)).repeat(temp.shape[-1], axis=1)
denominator_ = temp[:, :, np.newaxis] / denominator_
return 1 / denominator_.sum(2)
def predict(self, X):
if len(X.shape) == 1:
X = np.expand_dims(X, axis=0)
u = self._predict(X, self.centers)
return np.argmax(u, axis=-1)
######################################## Part I
#from fcmeans import FCM
def run_cluster(n_clusters, features, initial_centers=None, random_state=42):
# membership probabilities
model = OPC(n_clusters=n_clusters, random_state=random_state, max_iter=1000, error=1e-9)
model = model.fit(features, initial_centers=initial_centers)
p = model.u
centers = model.centers
# representative cluster
representative_cluster = np.argmax(p, 1)
# membership entropy
Sx = -np.sum(p*np.log(p), 1) / np.log(n_clusters)
# total membership entropy (across the entire feature space)
S = np.sum(Sx)
return centers, p, representative_cluster, Sx, S
```
Check if I'm introducing a regularization in inferring the optimal number of clusters
```
regularization = 1.0
```
## Experimental results
```
import numpy as np
```
(n,k,m) n observations, k clusters, at least m observations per cluster
```
def construct_random_partition(n, k, m, seed=None):
rand = np.random.RandomState(seed=seed)
parts = rand.choice(range(1, n-k*(m-1)), k-1, replace=False)
parts.sort()
parts = np.append(parts, n-k*(m-1))
parts = np.append(parts[0], np.diff(parts)) - 1 + m
return parts
partition = construct_random_partition(n=200, k=5, m=2, seed=40)
print(partition)
```
**Generation of random datasets**
```
def generate_random_dataset(partition, n_features, std, seed):
random_state = np.random.RandomState(seed=seed)
dataset = list()
for n in partition:
# cluster centre coordinates
cluster_centre = random_state.uniform(-1, 1, n_features)
# observation coordinates
for observation in range(0, n):
dataset.append(cluster_centre+std*random_state.standard_normal(n_features))
dataset = np.array(dataset)
# shuffles the observations
dataset = dataset[random_state.permutation(dataset.shape[0]), :]
return np.array(dataset)
dataset = generate_random_dataset(partition=partition, n_features=2, std=0.05, seed=42)
```
We will, at each iteration, collect the mean-intracluster entropy
```
Si = list()
iteration = 0
centers = None
n_clusters_trials = np.arange(2, 10, 1)
```
Some helpful functions
```
### Minimization of membership entropy
def minimize_membership_entropy(n_clusters_trials, dataset, regularization=0, random_state=42):
total_entropies = list()
for trial in n_clusters_trials:
_, _, _, _, total_entropy = run_cluster(n_clusters=trial,
features=dataset,
random_state=random_state)
total_entropies.append(total_entropy+regularization*trial)
optimal_nclusters = n_clusters_trials[np.argmin(total_entropies)]
return optimal_nclusters, total_entropies
### Cluster quality
def calculate_cluster_quality(p, representative_cluster, PRINT=True):
Si = dict()
for clust in set(representative_cluster):
probs = p[np.argmax(p, 1)==clust, :]
entropy = -np.sum(probs*np.log(probs), 1) / np.log(probs.shape[1])
Si.update({clust: np.mean(entropy)})
if PRINT:
[print("Mean membership entropy across cluster {0} = {1}".format(i, np.round(Si[i], 3))) for i in Si.keys()]
return Si
```
### Iteration 1
**1.1) Minimization of membership entropy**
```
optimal_nclusters, total_entropies = minimize_membership_entropy(n_clusters_trials, dataset, regularization)
print("Optimal number of clusters =", optimal_nclusters)
```
**1.2) Clustering**
```
centers, p, representative_cluster, Sx, S = run_cluster(optimal_nclusters, dataset)
```
**1.3) Cluster quality**
```
Si.append(calculate_cluster_quality(p, representative_cluster))
```
**1.4) Plot**
```
import matplotlib
from matplotlib import cm
import matplotlib.pyplot as plt
def make_rgb_transparent(rgb, alpha):
bg_rgb = [1, 1, 1]
return [alpha * c1 + (1 - alpha) * c2 for (c1, c2) in zip(rgb, bg_rgb)]
colormap = cm.get_cmap('Accent')
edgecolors = list()
facecolors = list()
for i in range(0, optimal_nclusters):
edgecolors.append(make_rgb_transparent(rgb=colormap(1.0*i/(optimal_nclusters-1)), alpha=1))
facecolors.append(make_rgb_transparent(rgb=colormap(1.0*i/(optimal_nclusters-1)), alpha=0.65))
fig, axes = plt.subplots(1, 2, figsize=(10, 4))
axes[0].plot([optimal_nclusters, optimal_nclusters], [0, np.max(total_entropies)], color=(0.8,0.6,0.6), linewidth=2)
axes[0].plot(n_clusters_trials, total_entropies, color=(0.46,0.46,0.46), linewidth=2)
axes[0].set_xlabel('Number of clusters')
axes[0].set_ylabel('Total membership entropy')
color_seq = list()
for j in range(0, dataset.shape[0]):
color_seq.append(make_rgb_transparent(edgecolors[representative_cluster[j]], 1-Sx[j]))
for i in range(0, optimal_nclusters):
axes[1].scatter([], [], label=str(i), color=edgecolors[i])
axes[1].scatter(dataset[:,0], dataset[:,1], marker='.', s=60, edgecolors=(0.6,0.6,0.6,0.5), c=color_seq)
axes[1].scatter(centers[:,0], centers[:,1], color=(0.8,0.2,0.2, 0.8), marker="v")
axes[1].set_xlabel('X')
axes[1].set_ylabel('Y')
axes[1].set_xlim(-1.2,1.2)
axes[1].set_ylim(-1.2,1.2)
axes[1].legend(loc="best")
plt.tight_layout()
plt.show()
```
**1.5) Finds clusters with an below-average mean membership entropy**
```
print("Intra-cluster mean membership entropy")
Si[iteration]
bad_clusters = np.array(list(Si[iteration].keys()))[list(Si[iteration].values()) > np.mean(list(Si[iteration].values()))]
print("Clusters with above-average membership entropy")
bad_clusters
good_clusters = np.array(list(set(Si[iteration].keys()).difference(set(bad_clusters))))
good_clusters
centers_good_clusters = centers[good_clusters,:]
```
**1.6) Collects observations in the above selected clusters**
```
inds = []
for cluster in bad_clusters:
inds += list(np.where(representative_cluster==cluster)[0])
inds = np.squeeze(np.array(inds))
dataset_bad_clusters = dataset[inds,:]
optimal_nclusters, total_entropies = minimize_membership_entropy(n_clusters_trials, dataset_bad_clusters, regularization)
print("Optimal number of clusters =", optimal_nclusters)
new_centers, p, representative_cluster, Sx, S = run_cluster(optimal_nclusters, dataset)
trial_centers = np.vstack((centers_good_clusters, new_centers))
centers, p, representative_cluster, Sx, S = run_cluster(centers.shape[0], dataset, initial_centers=trial_centers)
optimal_nclusters = centers.shape[0]
edgecolors = list()
facecolors = list()
for i in range(0, optimal_nclusters):
edgecolors.append(make_rgb_transparent(rgb=colormap(1.0*i/(optimal_nclusters-1)), alpha=1))
facecolors.append(make_rgb_transparent(rgb=colormap(1.0*i/(optimal_nclusters-1)), alpha=0.65))
fig, axes = plt.subplots(1, 2, figsize=(10, 4))
color_seq = list()
for j in range(0, dataset.shape[0]):
color_seq.append(make_rgb_transparent(edgecolors[representative_cluster[j]], 1-Sx[j]))
for i in range(0, optimal_nclusters):
axes[1].scatter([], [], label=str(i), color=edgecolors[i])
axes[1].scatter(dataset[:,0], dataset[:,1], marker='.', s=60, edgecolors=(0.6,0.6,0.6,0.5), c=color_seq)
axes[1].scatter(centers[:,0], trial_centers[:,1], color=(0.8,0.2,0.2, 0.8), marker="v")
axes[1].set_xlabel('X')
axes[1].set_ylabel('Y')
axes[1].set_xlim(-1.2,1.2)
axes[1].set_ylim(-1.2,1.2)
axes[1].legend(loc="best")
plt.tight_layout()
plt.show()
len(edgecolors)
```
Initialize fcmeans with different seeds the do statistics would probably help
**References:**
{% bibliography --cited %}
| github_jupyter |
# Neural networks with PyTorch
Next I'll show you how to build a neural network with PyTorch.
```
# Import things like usual
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import torch
import helper
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
```
First up, we need to get our dataset. This is provided through the `torchvision` package. The code below will download the MNIST dataset, then create training and test datasets for us. Don't worry too much about the details here, you'll learn more about this later.
```
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
# Download and load the training data
trainset = datasets.MNIST('MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.MNIST('MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
dataiter = iter(trainloader)
images, labels = dataiter.next()
```
We have the training data loaded into `trainloader` and we make that an iterator with `iter(trainloader)`. We'd use this to loop through the dataset for training, but here I'm just grabbing the first batch so we can check out the data. We can see below that `images` is just a tensor with size (64, 1, 28, 28). So, 64 images per batch, 1 color channel, and 28x28 images.
```
plt.imshow(images[1].numpy().squeeze(), cmap='Greys_r');
```
## Building networks with PyTorch
Here I'll use PyTorch to build a simple feedfoward network to classify the MNIST images. That is, the network will receive a digit image as input and predict the digit in the image.
<img src="assets/mlp_mnist.png" width=600px>
To build a neural network with PyTorch, you use the `torch.nn` module. The network itself is a class inheriting from `torch.nn.Module`. You define each of the operations separately, like `nn.Linear(784, 128)` for a fully connected linear layer with 784 inputs and 128 units.
The class needs to include a `forward` method that implements the forward pass through the network. In this method, you pass some input tensor `x` through each of the operations you defined earlier. The `torch.nn` module also has functional equivalents for things like ReLUs in `torch.nn.functional`. This module is usually imported as `F`. Then to use a ReLU activation on some layer (which is just a tensor), you'd do `F.relu(x)`. Below are a few different commonly used activation functions.
<img src="assets/activation.png" width=700px>
So, for this network, I'll build it with three fully connected layers, then a softmax output for predicting classes. The softmax function is similar to the sigmoid in that it squashes inputs between 0 and 1, but it's also normalized so that all the values sum to one like a proper probability distribution.
```
from torch import nn
from torch import optim
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self):
super().__init__()
# Defining the layers, 128, 64, 10 units each
self.fc1 = nn.Linear(784, 128)
self.fc2 = nn.Linear(128, 64)
# Output layer, 10 units - one for each digit
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
''' Forward pass through the network, returns the output logits '''
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = F.softmax(x, dim=1)
return x
model = Network()
model
```
### Initializing weights and biases
The weights and such are automatically initialized for you, but it's possible to customize how they are initialized. The weights and biases are tensors attached to the layer you defined, you can get them with `model.fc1.weight` for instance.
```
print(model.fc1.weight)
print(model.fc1.bias)
```
For custom initialization, we want to modify these tensors in place. These are actually autograd *Variables*, so we need to get back the actual tensors with `model.fc1.weight.data`. Once we have the tensors, we can fill them with zeros (for biases) or random normal values.
```
# Set biases to all zeros
model.fc1.bias.data.fill_(0)
# sample from random normal with standard dev = 0.01
model.fc1.weight.data.normal_(std=0.01)
```
### Forward pass
Now that we have a network, let's see what happens when we pass in an image. This is called the forward pass. We're going to convert the image data into a tensor, then pass it through the operations defined by the network architecture.
```
# Grab some data
dataiter = iter(trainloader)
images, labels = dataiter.next()
# Resize images into a 1D vector, new shape is (batch size, color channels, image pixels)
images.resize_(64, 1, 784)
# or images.resize_(images.shape[0], 1, 784) to not automatically get batch size
# Forward pass through the network
img_idx = 0
ps = model.forward(images[img_idx,:])
img = images[img_idx]
helper.view_classify(img.view(1, 28, 28), ps)
```
As you can see above, our network has basically no idea what this digit is. It's because we haven't trained it yet, all the weights are random!
PyTorch provides a convenient way to build networks like this where a tensor is passed sequentially through operations, `nn.Sequential` ([documentation](https://pytorch.org/docs/master/nn.html#torch.nn.Sequential)). Using this to build the equivalent network:
```
# Hyperparameters for our network
input_size = 784
hidden_sizes = [128, 64]
output_size = 10
# Build a feed-forward network
model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),
nn.ReLU(),
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
nn.ReLU(),
nn.Linear(hidden_sizes[1], output_size),
nn.Softmax(dim=1))
print(model)
# Forward pass through the network and display output
images, labels = next(iter(trainloader))
images.resize_(images.shape[0], 1, 784)
ps = model.forward(images[0,:])
helper.view_classify(images[0].view(1, 28, 28), ps)
```
You can also pass in an `OrderedDict` to name the individual layers and operations. Note that a dictionary keys must be unique, so _each operation must have a different name_.
```
from collections import OrderedDict
model = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_size, hidden_sizes[0])),
('relu1', nn.ReLU()),
('fc2', nn.Linear(hidden_sizes[0], hidden_sizes[1])),
('relu2', nn.ReLU()),
('output', nn.Linear(hidden_sizes[1], output_size)),
('softmax', nn.Softmax(dim=1))]))
model
```
Now it's your turn to build a simple network, use any method I've covered so far. In the next notebook, you'll learn how to train a network so it can make good predictions.
>**Exercise:** Build a network to classify the MNIST images with _three_ hidden layers. Use 400 units in the first hidden layer, 200 units in the second layer, and 100 units in the third layer. Each hidden layer should have a ReLU activation function, and use softmax on the output layer.
```
## TODO: Your network here
## Run this cell with your model to make sure it works ##
# Forward pass through the network and display output
images, labels = next(iter(trainloader))
images.resize_(images.shape[0], 1, 784)
ps = model.forward(images[0,:])
helper.view_classify(images[0].view(1, 28, 28), ps)
```
| github_jupyter |
# Example: CanvasXpress boxplot Chart No. 11
This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at:
https://www.canvasxpress.org/examples/boxplot-11.html
This example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function.
Everything required for the chart to render is included in the code below. Simply run the code block.
```
from canvasxpress.canvas import CanvasXpress
from canvasxpress.js.collection import CXEvents
from canvasxpress.render.jupyter import CXNoteBook
cx = CanvasXpress(
render_to="boxplot11",
data={
"y": {
"smps": [
"Var1",
"Var2",
"Var3",
"Var4",
"Var5",
"Var6",
"Var7",
"Var8",
"Var9",
"Var10",
"Var11",
"Var12",
"Var13",
"Var14",
"Var15",
"Var16",
"Var17",
"Var18",
"Var19",
"Var20",
"Var21",
"Var22",
"Var23",
"Var24",
"Var25",
"Var26",
"Var27",
"Var28",
"Var29",
"Var30",
"Var31",
"Var32",
"Var33",
"Var34",
"Var35",
"Var36",
"Var37",
"Var38",
"Var39",
"Var40",
"Var41",
"Var42",
"Var43",
"Var44",
"Var45",
"Var46",
"Var47",
"Var48",
"Var49",
"Var50",
"Var51",
"Var52",
"Var53",
"Var54",
"Var55",
"Var56",
"Var57",
"Var58",
"Var59",
"Var60"
],
"data": [
[
4.2,
11.5,
7.3,
5.8,
6.4,
10,
11.2,
11.2,
5.2,
7,
16.5,
16.5,
15.2,
17.3,
22.5,
17.3,
13.6,
14.5,
18.8,
15.5,
23.6,
18.5,
33.9,
25.5,
26.4,
32.5,
26.7,
21.5,
23.3,
29.5,
15.2,
21.5,
17.6,
9.7,
14.5,
10,
8.2,
9.4,
16.5,
9.7,
19.7,
23.3,
23.6,
26.4,
20,
25.2,
25.8,
21.2,
14.5,
27.3,
25.5,
26.4,
22.4,
24.5,
24.8,
30.9,
26.4,
27.3,
29.4,
23
]
],
"vars": [
"len"
]
},
"x": {
"supp": [
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ"
],
"order": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
],
"dose": [
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2
]
}
},
config={
"axisAlgorithm": "rPretty",
"axisTickScaleFontFactor": 1.8,
"axisTitleFontStyle": "bold",
"axisTitleScaleFontFactor": 1.8,
"colorBy": "dose",
"graphOrientation": "vertical",
"graphType": "Boxplot",
"groupingFactors": [
"dose"
],
"legendScaleFontFactor": 1.8,
"showLegend": True,
"smpLabelRotate": 90,
"smpLabelScaleFontFactor": 1.8,
"smpTitle": "dose",
"smpTitleFontStyle": "bold",
"smpTitleScaleFontFactor": 1.8,
"stringSampleFactors": [
"dose"
],
"theme": "CanvasXpress",
"title": "The Effect of Vitamin C on Tooth Growth in Guinea Pigs",
"xAxis2Show": False,
"xAxisMinorTicks": False,
"xAxisTitle": "len"
},
width=613,
height=613,
events=CXEvents(),
after_render=[
[
"switchNumericToString",
[
"dose",
True
]
]
],
other_init_params={
"version": 35,
"events": False,
"info": False,
"afterRenderInit": False,
"noValidate": True
}
)
display = CXNoteBook(cx)
display.render(output_file="boxplot_11.html")
```
| github_jupyter |
<a href="https://colab.research.google.com/github/KinsleyDavis/Novo/blob/main/Colab_ArteMaisComp.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
def escolher_arquivo():
import ipywidgets as widgets
from IPython.display import clear_output
import os
import matplotlib.pyplot as plt
!pip install chainer &> /dev/null
!pip install cupy-cuda101==7.7.0
!git clone https://github.com/artemaiscomp/chainer-fast-neuralstyle &> /dev/null
from google.colab import files
content_img = files.upload()
CONTENT_IMAGE_FN = list(content_img)[0]
CONTENT_IMAGE_FN_temp = CONTENT_IMAGE_FN.strip().replace(" ", "_")
if CONTENT_IMAGE_FN != CONTENT_IMAGE_FN_temp:
os.rename(CONTENT_IMAGE_FN, CONTENT_IMAGE_FN_temp)
CONTENT_IMAGE_FN = CONTENT_IMAGE_FN_temp
#print("Nome do arquivo da imagem :", CONTENT_IMAGE_FN)
%matplotlib inline
fig = plt.figure(figsize=(10, 10))
img = plt.imread(CONTENT_IMAGE_FN)
plt.axis('off')
#plt.title('Content image')
plt.imshow(img)
#@title Clique no Play e após em "Escolher Arquivo" para escolher sua imagem a ser estilizada
import ipywidgets as widgets
from IPython.display import clear_output
import os
import matplotlib.pyplot as plt
!pip install chainer &> /dev/null
!pip install cupy-cuda101 &> /dev/null
!git clone https://github.com/artemaiscomp/chainer-fast-neuralstyle &> /dev/null
from google.colab import files
content_img = files.upload()
CONTENT_IMAGE_FN = list(content_img)[0]
CONTENT_IMAGE_FN_temp = CONTENT_IMAGE_FN.strip().replace(" ", "_")
if CONTENT_IMAGE_FN != CONTENT_IMAGE_FN_temp:
os.rename(CONTENT_IMAGE_FN, CONTENT_IMAGE_FN_temp)
CONTENT_IMAGE_FN = CONTENT_IMAGE_FN_temp
#print("Nome do arquivo da imagem :", CONTENT_IMAGE_FN)
%matplotlib inline
fig = plt.figure(figsize=(10, 10))
img = plt.imread(CONTENT_IMAGE_FN)
plt.axis('off')
#plt.title('Content image')
plt.imshow(img)
#@title Selecione a arte a ser aplicada.
import os, ipywidgets as widgets
from IPython.display import clear_output
model_files = [f for f in os.listdir('/content/chainer-fast-neuralstyle/models') if f.endswith('.model')]
model=widgets.Dropdown(
options=model_files,
value='hokusai.model',
description='Modelo:',
disabled=False,
)
model
#@title Clique no botão Play e abaixo no botao OK para converter a imagem com o estilo escolhido.
clear_output()
#@title Clique no botão Play e abaixo no botao OK para converter a imagem com o estilo escolhido.
from IPython.display import clear_output
button = widgets.Button(description='OK')
!pip install chainer &> /dev/null
!pip install cupy-cuda101==7.7.0 &> /dev/null
clear_output()
out = widgets.Output()
def on_button_clicked(_):
# "linkar funcão com saída"
with out:
!python chainer-fast-neuralstyle/generate.py $CONTENT_IMAGE_FN unique -m chainer-fast-neuralstyle/models/$model.value -o output.jpg --gpu 0 &> /dev/null
fig = plt.figure(figsize=(10, 10))
img = plt.imread('output.jpg')
plt.axis('off')
plt.title('imagem estilizada')
plt.imshow(img)
# unir butão e funcão juntos usando um métodos no butão
button.on_click(on_button_clicked)
# mostrar butão e sua saída juntos
widgets.VBox([button,out])
```
| github_jupyter |
# Arbitrage Pricing Theory
By Evgenia "Jenny" Nitishinskaya, Delaney Granizo-Mackenzie, and Maxwell Margenot.
Part of the Quantopian Lecture Series:
* [www.quantopian.com/lectures](https://www.quantopian.com/lectures)
* [github.com/quantopian/research_public](https://github.com/quantopian/research_public)
Notebook released under the Creative Commons Attribution 4.0 License.
---
Arbitrage pricing theory is a major asset pricing theory that relies on expressing the returns using a linear factor model:
$$R_i = a_i + b_{i1} F_1 + b_{i2} F_2 + \ldots + b_{iK} F_K + \epsilon_i$$
This theory states that if we have modelled our rate of return as above, then the expected returns obey
$$ E(R_i) = R_F + b_{i1} \lambda_1 + b_{i2} \lambda_2 + \ldots + b_{iK} \lambda_K $$
where $R_F$ is the risk-free rate, and $\lambda_j$ is the risk premium - the return in excess of the risk-free rate - for factor $j$. This premium arises because investors require higher returns to compensate them for incurring risk. This generalizes the capital asset pricing model (CAPM), which uses the return on the market as its only factor.
We can compute $\lambda_j$ by constructing a portfolio that has a sensitivity of 1 to factor $j$ and 0 to all others (called a <i>pure factor portfolio</i> for factor $j$), and measure its return in excess of the risk-free rate. Alternatively, we could compute the factor sensitivities for $K$ well-diversified (no asset-specific risk, i.e. $\epsilon_p = 0$) portfolios, and then solve the resulting system of linear equations.
## Arbitrage
There are generally many, many securities in our universe. If we use different ones to compute the $\lambda$s, will our results be consistent? If our results are inconsistent, there is an <i>arbitrage opportunity</i> (in expectation). Arbitrage is an operation that earns a profit without incurring risk and with no net investment of money, and an arbitrage opportunity is an opportunity to conduct such an operation. In this case, we mean that there is a risk-free operation with <i>expected</i> positive return that requires no net investment. It occurs when expectations of returns are inconsistent, i.e. risk is not priced consistently across securities.
For instance, there is an arbitrage opportunity in the following case: say there is an asset with expected rate of return 0.2 for the next year and a $\beta$ of 1.2 with the market, while the market is expected to have a rate of return of 0.1, and the risk-free rate on 1-year bonds is 0.05. Then the APT model tells us that the expected rate of return on the asset should be
$$ R_F + \beta \lambda = 0.05 + 1.2 (0.1 - 0.05) = 0.11$$
This does not agree with the prediction that the asset will have a rate of return of 0.2. So, if we buy \$100 of our asset, short \$120 of the market, and buy \$20 of bonds, we will have invested no net money and are not exposed to any systematic risk (we are market-neutral), but we expect to earn $0.2 \cdot 100 - 0.1 \cdot 120 + 20 \cdot 0.05 = 9$ dollars at the end of the year.
The APT assumes that these opportunities will be taken advantage of until prices shift and the arbitrage opportunities disappear. That is, it assumes that there are arbitrageurs who have sufficient amounts of patience and capital. This provides a justification for the use of empirical factor models in pricing securities: if the model were inconsistent, there would be an arbitrage opportunity, and so the prices would adjust.
##Goes Both Ways
Often knowing $E(R_i)$ is incredibly difficult, but notice that this model tells us what the expected returns should be if the market is fully arbitraged. This lays the groundwork for long-short equity strategies based on factor model ranking systems. If you know what the expected return of an asset is given that the market is arbitraged, and you hypothesize that the market will be mostly arbitraged over the timeframe on which you are trading, then you can construct a ranking.
##Long-Short Equity
To do this, estimate the expected return for each asset on the market, then rank them. Long the top percentile and short the bottom percentile, and you will make money on the difference in returns. Said another way, if the assets at the top of the ranking on average tend to make $5\%$ more per year than the market, and assets at the bottom tend to make $5\%$ less, then you will make $(M + 0.05) - (M - 0.05) = 0.10$ or $10\%$ percent per year, where $M$ is the market return that gets canceled out.
Long-short equity accepts that any individual asset is very difficult to model, relies on broad trends holding true. We can't accurately predict expected returns for an asset, but we can predict the expected returns for a group of 1000 assets as the errors average out.
We will have a full lecture on long-short models later.
##How many factors do you want?
As discussed in other lectures, noteably Overfitting, having more factors will explain more and more of your returns, but at the cost of being more and more fit to noise in your data. Do discover true signals and make good predictions going forward, you want to select as few parameters as possible that still explain a large amount of the variance in returns.
##Example: Computing Expected Returns for Two Assets
```
import numpy as np
import pandas as pd
from statsmodels import regression
import matplotlib.pyplot as plt
```
Let's get some data.
```
start_date = '2014-06-30'
end_date = '2015-06-30'
# We will look at the returns of an asset one-month into the future to model future returns.
offset_start_date = '2014-07-31'
offset_end_date = '2015-07-31'
# Get returns data for our assets
asset1 = get_pricing('HSC', fields='price', start_date=offset_start_date, end_date=offset_end_date).pct_change()[1:]
asset2 = get_pricing('MSFT', fields='price', start_date=offset_start_date, end_date=offset_end_date).pct_change()[1:]
# Get returns for the market
bench = get_pricing('SPY', fields='price', start_date=start_date, end_date=end_date).pct_change()[1:]
# Use an ETF that tracks 3-month T-bills as our risk-free rate of return
treasury_ret = get_pricing('BIL', fields='price', start_date=start_date, end_date=end_date).pct_change()[1:]
# Define a constant to compute intercept
constant = pd.TimeSeries(np.ones(len(asset1.index)), index=asset1.index)
df = pd.DataFrame({'R1': asset1,
'R2': asset2,
'SPY': bench,
'RF': treasury_ret,
'Constant': constant})
df = df.dropna()
```
We'll start by computing static regressions over the whole time period.
```
OLS_model = regression.linear_model.OLS(df['R1'], df[['SPY', 'RF', 'Constant']])
fitted_model = OLS_model.fit()
print 'p-value', fitted_model.f_pvalue
print fitted_model.params
R1_params = fitted_model.params
OLS_model = regression.linear_model.OLS(df['R2'], df[['SPY', 'RF', 'Constant']])
fitted_model = OLS_model.fit()
print 'p-value', fitted_model.f_pvalue
print fitted_model.params
R2_params = fitted_model.params
```
As we've said before in other lectures, these numbers don't tell us too much by themselves. We need to look at the distribution of estimated coefficients and whether it's stable. Let's look at the rolling 100-day regression to see how it looks.
```
model = pd.stats.ols.MovingOLS(y = df['R1'], x=df[['SPY', 'RF']],
window_type='rolling',
window=100)
rolling_parameter_estimates = model.beta
rolling_parameter_estimates.plot();
plt.hlines(R1_params['SPY'], df.index[0], df.index[-1], linestyles='dashed', colors='blue')
plt.hlines(R1_params['RF'], df.index[0], df.index[-1], linestyles='dashed', colors='green')
plt.hlines(R1_params['Constant'], df.index[0], df.index[-1], linestyles='dashed', colors='red')
plt.title('Asset1 Computed Betas');
plt.legend(['Market Beta', 'Risk Free Beta', 'Intercept', 'Market Beta Static', 'Risk Free Beta Static', 'Intercept Static']);
model = pd.stats.ols.MovingOLS(y = df['R2'], x=df[['SPY', 'RF']],
window_type='rolling',
window=100)
rolling_parameter_estimates = model.beta
rolling_parameter_estimates.plot();
plt.hlines(R2_params['SPY'], df.index[0], df.index[-1], linestyles='dashed', colors='blue')
plt.hlines(R2_params['RF'], df.index[0], df.index[-1], linestyles='dashed', colors='green')
plt.hlines(R2_params['Constant'], df.index[0], df.index[-1], linestyles='dashed', colors='red')
plt.title('Asset2 Computed Betas');
plt.legend(['Market Beta', 'Risk Free Beta', 'Intercept', 'Market Beta Static', 'Risk Free Beta Static', 'Intercept Static']);
```
It might seem like the market betas are stable here, but let's zoom in to check.
```
model = pd.stats.ols.MovingOLS(y = df['R2'], x=df[['SPY', 'RF']],
window_type='rolling',
window=100)
rolling_parameter_estimates = model.beta
rolling_parameter_estimates['SPY'].plot();
plt.hlines(R2_params['SPY'], df.index[0], df.index[-1], linestyles='dashed', colors='blue')
plt.title('Asset2 Computed Betas');
plt.legend(['Market Beta', 'Market Beta Static']);
```
As you can see, the plot scale massively affects how we perceive estimate quality.
##Predicting the Future
Let's use this model to predict future prices for these assets.
```
start_date = '2014-07-25'
end_date = '2015-07-25'
# We will look at the returns of an asset one-month into the future to model future returns.
offset_start_date = '2014-08-25'
offset_end_date = '2015-08-25'
# Get returns data for our assets
asset1 = get_pricing('HSC', fields='price', start_date=offset_start_date, end_date=offset_end_date).pct_change()[1:]
# Get returns for the market
bench = get_pricing('SPY', fields='price', start_date=start_date, end_date=end_date).pct_change()[1:]
# Use an ETF that tracks 3-month T-bills as our risk-free rate of return
treasury_ret = get_pricing('BIL', fields='price', start_date=start_date, end_date=end_date).pct_change()[1:]
# Define a constant to compute intercept
constant = pd.TimeSeries(np.ones(len(asset1.index)), index=asset1.index)
df = pd.DataFrame({'R1': asset1,
'SPY': bench,
'RF': treasury_ret,
'Constant': constant})
df = df.dropna()
```
We'll perform a historical regression to get our model parameter estimates.
```
OLS_model = regression.linear_model.OLS(df['R1'], df[['SPY', 'RF', 'Constant']])
fitted_model = OLS_model.fit()
print 'p-value', fitted_model.f_pvalue
print fitted_model.params
b_SPY = fitted_model.params['SPY']
b_RF = fitted_model.params['RF']
a = fitted_model.params['Constant']
```
Get the factor data for the last month so we can predict the next month.
```
start_date = '2015-07-25'
end_date = '2015-08-25'
# Get returns for the market
last_month_bench = get_pricing('SPY', fields='price', start_date=start_date, end_date=end_date).pct_change()[1:]
# Use an ETF that tracks 3-month T-bills as our risk-free rate of return
last_month_treasury_ret = get_pricing('BIL', fields='price', start_date=start_date, end_date=end_date).pct_change()[1:]
```
Make our predictions.
```
predictions = b_SPY * last_month_bench + b_RF * last_month_treasury_ret + a
predictions.index = predictions.index + pd.DateOffset(months=1)
plt.plot(asset1.index[-30:], asset1.values[-30:], 'b-')
plt.plot(predictions.index, predictions, 'b--')
plt.ylabel('Returns')
plt.legend(['Actual', 'Predicted']);
```
Of course, this analysis hasn't yet told us anything about the quality of our predictions. To check the quality of our predictions we need to use techniques such as out of sample testing or cross-validation. For the purposes of long-short equity ranking systems, the Spearman Correlation lecture details a way to check the quality of a ranking system.
##Important Note!
Again, any of these individual predictions will probably be inaccurate. Industry-quality modeling makes predictions for thousands of assets and relies on broad tends holding. If I told you that I have a predictive model with a 51% success rate, you would not make one prediction and bet all your money on it. You would make thousands of predictions and divide your money between them.
| github_jupyter |
<h1 align="center"> Circuit Analysis Using Sympy</h1>
<h2 align="center"> Assignment 7</h2>
<h3 align="center"> M V A Suhas kumar,EE17B109</h3>
<h4 align="center">March 16,2019 </h4>
# Introduction
In this assignment, we use Sympy to analytically solve a matrix equation governing an analog circuit. We look at two circuits, an active low pass filter and an active high pass filter. We create matrices using node equations for the circuits in sympy, and then solve the equations analytically. We then convert the resulting sympy solution into a numpy function which can be called. We then use the signals toolbox we studied in the last assignment to understand the responses of the two circuits to various inputs.
Importing required packages
```
from sympy import *
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sp
from pylab import *
from IPython.display import *
```
# Low pass Filter

where G =1.586 and R1 = R2 = 10kΩ and C1=C2=10pF. This gives a 3dB Butter-worth filter with cutoff frequency of 1/2πMHz.
Circuit Equations are as follows:
$$V_{m}=\frac{V_{o}}{G}$$
$$ V_{p} =V_{1} \frac{1}{1+s R_{2}C_{2}}$$
$$ V_{o} = G(V_{p} - V_{m})$$
$$\frac{V_{i}-V_{1}}{R_{1}} + \frac{V_{p}-V_{1}}{R_{2}} + s C_{1}(V_{0}-V_{1}) = 0$$
Solving the above equations with approxmtion gives
$$ V_{o} \approx \frac{V_{i}}{s R_{1} C_{1}}$$
We would like to solve this in Python and also get (and plot) the exact result. For this we need the sympy module.
To solve the equtions exactly we use matrix method of solving:
```
init_printing()
R1,R2,C1,C2,G = symbols("R1 R2 C1 C2 G")
V1,Vp,Vm,Vo,Vi = symbols("V1 Vp Vm Vo Vi")
s = symbols("s")
A = Matrix([[0,0,1,-1/G],
[-1/(1+s*R2*C2),1,0,0],
[0,-G,G,1],
[-1/R1-1/R2-s*C1,1/R2,0,s*C1]])
M = Matrix([V1,Vp,Vm,Vo])
b = Matrix([0,0,0,Vi/R1])
display(Eq(MatMul(A,M),b))
```
Solving the above matrix yield exact result
Function defining low pass filter:
```
def lowpass(R1=10**4,R2=10**4,C1=10**-11,C2=10**-11,G=1.586,Vi=1):
s=symbols("s")
A=Matrix([[0,0,1,-1/G],
[-1/(1+s*R2*C2),1,0,0],
[0,-G,G,1],
[-1/R1-1/R2-s*C1,1/R2,0,s*C1]])
b=Matrix([0,0,0,Vi/R1])
V = A.inv()*b
return(A,b,V)
```
Function which can take input in laplace domain or time domain and give the output of low pass filter:
```
def low_pass_output(laplace_fn = None,time_fn=None,t=np.linspace(0,1e-5,1e5),C=10**-11):
A,b,V = lowpass(C1=C,C2=C)
v_low_pass = V[-1]
temp = expand(simplify(v_low_pass))
n,d = fraction(temp)
n,d = Poly(n,s),Poly(d,s)
num,den = n.all_coeffs(),d.all_coeffs()
H_v_low_pass = sp.lti([-float(f) for f in num],[float(f) for f in den])
if laplace_fn !=None:
temp = expand(simplify(laplace_fn))
n,d = fraction(temp)
n,d = Poly(n,s),Poly(d,s)
num,den = n.all_coeffs(),d.all_coeffs()
lap = sp.lti([float(f) for f in num],[float(f) for f in den])
t,u = sp.impulse(lap,None,t)
else:
u = time_fn
t,V_out,svec = sp.lsim(H_v_low_pass,u,t)
return (t,V_out)
```
# High pass filter

values you can use are R1=R3=10kΩ, C1=C2=1nF, and G=1.586
Circuit Equations are as follows:
$$V_{n}=\frac{V_{o}}{G}$$
$$ V_{p} =V_{1} \frac{s R_{3}C_{2}}{1+s R_{3}C_{2}}$$
$$ V_{o} = G(V_{p} - V_{n})$$
$$(V_{1}-V_{i})sC_{1} + \frac{(V_{1}-V_{o})}{R_{1}} + (V_{i}-V_{p})sC_{2} = 0 $$
```
R1, R3, C1, C2, G, Vi = symbols('R_1 R_3 C_1 C_2 G V_i')
V1,Vn,Vp,Vo = symbols('V_1 V_n V_p V_o')
x=Matrix([V1,Vn,Vp,Vo])
A=Matrix([[0,-1,0,1/G],
[s*C2*R3/(s*C2*R3+1),0,-1,0],
[0,G,-G,1],
[-s*C2-1/R1-s*C1,0,s*C2,1/R1]])
b=Matrix([0,0,0,-Vi*s*C1])
init_printing
display(Eq(MatMul(A,x),b))
```
Function defining high pass filter:
```
def highpass(R1=10**4,R3=10**4,C1=10**-9,C2=10**-9,G=1.586,Vi=1):
s= symbols("s")
A=Matrix([[0,-1,0,1/G],
[s*C2*R3/(s*C2*R3+1),0,-1,0],
[0,G,-G,1],
[-s*C2-1/R1-s*C1,0,s*C2,1/R1]])
b=Matrix([0,0,0,-Vi*s*C1])
V =A.inv() * b
return (A,b,V)
```
Function which can take input in laplace domain or time domain and give the output of high pass filter:
```
def high_pass_output(laplace_fn = None,time_fn=None,t=np.linspace(0,1e-4,1e5),C=10**-11):
A,b,V = highpass(C1=C,C2=C)
v_high_pass = V[-1]
temp = expand(simplify(v_high_pass))
n,d = fraction(temp)
n,d = Poly(n,s),Poly(d,s)
num,den = n.all_coeffs(),d.all_coeffs()
H_v_high_pass = sp.lti([float(f) for f in num],[float(f) for f in den])
if laplace_fn !=None:
temp = expand(simplify(laplace_fn))
n,d = fraction(temp)
n,d = Poly(n,s),Poly(d,s)
num,den = n.all_coeffs(),d.all_coeffs()
lap = sp.lti([float(f) for f in num],[float(f) for f in den])
t,u = sp.impulse(lap,None,t)
else:
u = time_fn
t,V_out,svec = sp.lsim(H_v_high_pass,u,t)
return (t,V_out)
```
# Question1
Step Response for low pass filter
```
t,V_low_step = low_pass_output(laplace_fn=1/s)
plt.plot(t,V_low_step)
plt.grid(True)
plt.xlabel("t ------>",size=14)
plt.ylabel(r"$Step\ Response\ V_{o}(t)$",size=14)
plt.title("Step Response When Capacitance = 10pF in low pass filter")
plt.show()
```
Step response is starting from zero and reaching 0.793 at steady state.This is because DC gain oftransfer function is 0.793.Initial value is 0 because AC gain of low pass filter is zero(impulse can be assumed as High frequency signal and we know low pass filter dosen't pass high frequency signal).
# Question2
Finding Output when input signal is $$(sin(2000πt)+cos(2×106πt))u_{o}(t)$$
```
t = np.linspace(0,1e-3,1e5)
plt.plot(t,np.sin(2000*np.pi*t)+np.cos(2e6*np.pi*t))
plt.grid(True)
plt.xlabel("t ------>",size=14)
plt.ylabel(r"$V_{i}(t)$",size=14)
plt.title("Mixed frequency input")
plt.show()
```
Band is high frequency wave and envolope is the low frequency wave
```
t = linspace(0,1e-5,1e5)
t,vout = low_pass_output(time_fn=np.sin(2000*np.pi*t)+np.cos(2e6*np.pi*t),t=t,C=10**-9)
plt.plot(t,vout)
plt.grid(True)
plt.xlabel("t ------>",size=14)
plt.ylabel(r"$V_{o}(t)$",size=14)
plt.title("Output for mixed frequency Sinusoid in lowpass filter in transient time")
plt.show()
```
From above we can clearly see that Output is superposition of High Amplitude low frequency wave and Low amplitude High frquency wave(Since Low pass filter attenuates the High frequencies)
```
t = linspace(0,1e-5,1e5)
t,vout = high_pass_output(time_fn=np.sin(2000*np.pi*t)+np.cos(2e6*np.pi*t),t=t,C=10**-9)
plt.plot(t,vout)
plt.grid(True)
plt.xlabel("t ------>",size=14)
plt.ylabel(r"$V_{o}(t)$",size=14)
plt.title("Output for mixed frequency Sinusoid in High pass filter in transient time")
plt.show()
```
The plot which is appearing to be band(closely placed lines) is superposition of High Amplitude High frequency wave and Low amplitude Low frquency wave(Since High pass filter attenuates the Low frequencies) which inturn appears to be non distorted sine wave.
```
t = linspace(0,1e-3,1e5)
t,vout = low_pass_output(time_fn=np.sin(2000*np.pi*t)+np.cos(2e6*np.pi*t),t=t,C=10**-9)
plt.plot(t,vout)
plt.grid(True)
plt.xlabel("t ------>",size=14)
plt.ylabel(r"$V_{o}(t)$",size=14)
plt.title("Output for mixed frequency Sinusoid in lowpass filter in steady time")
plt.show()
```
From graph we can see frequency is close to 1000Hz(which is low frquency input)
```
t = linspace(0,1e-4,1e5)
t,vout = high_pass_output(time_fn=np.sin(2000*np.pi*t)+np.cos(2e6*np.pi*t),t=t,C=10**-9)
plt.plot(t,vout)
plt.grid(True)
plt.xlabel("t ------>",size=14)
plt.ylabel(r"$V_{o}(t)$",size=14)
plt.title("Output for mixed frequency Sinusoid in High pass filter in steay time")
plt.show()
```
From graph we can see frequency is close to 1000KHz(which is high frquency input)
# Question 3,4
Damped Sinusoid -----> $exp(-300t)sin(10^{6}t)$
```
t = linspace(0,1e-3,1e6)
f = np.exp(-3000*t) * np.sin(10**6 *t)
plt.title("High frequency damped sinusoid")
plt.xlabel("$t$")
plt.ylabel("$v_i(t)$",size=20)
plt.plot(t,f)
plt.grid()
plt.show()
t = linspace(0,1e-3,1e6)
t,vout = high_pass_output(time_fn=f,t=t,C=10**-9)
plt.plot(t,vout)
plt.grid(True)
plt.xlabel("t ------>",size=14)
plt.ylabel(r"$V_{o}(t)$",size=14)
plt.title("Output for High frequency damped input in High pass filter")
plt.show()
```
From above graph we can clearly see that High pass filter passed high frequency sinusoid with out attenuating much.(Since property of high pass filter)
```
t = linspace(0,1e-3,1e6)
t,vout = low_pass_output(time_fn=f,t=t,C=10**-9)
plt.plot(t,vout)
plt.grid(True)
plt.xlabel("t ------>",size=14)
plt.ylabel(r"$V_{o}(t)$",size=14)
plt.title("Output for High frequency damped input in low pass filter")
plt.show()
```
From above graph Low pass filter quickly attenuated the High frequency Sinusoid and gives distorted Output
# Question 5
```
t,V_high_step = high_pass_output(laplace_fn=1/s,C=10**-9)
plt.plot(t,V_high_step)
plt.grid(True)
plt.xlabel("t ------>",size=14)
plt.ylabel(r"$Step\ Response\ V_{o}(t)$",size=14)
plt.title("Step Response When Capacitance = 1nF in high pass filter")
plt.show()
```
Step response here saturates at zero and this is because DC gain of High pass filter is 0. We can clearly see from graph that it starts from 0.793 and this because AC gain of transfer function at high frequencies is 0.793(Step can assumed as infinite frequency signal and we know high pass filter only allows high frequency signals)
step response overshoots the steady state value of 0, reaches an
extremum, then settles back to 0, unlike the response of the low pass filter which steadily
approaches the steady state value with no extrema. This occurs because of the presence of
zeros at the origin in the transfer function of the high pass filter(which imply that the DC
gain is 0). Since the steady state value of the step response is 0, the total signed area under
the curve of the impulse response must also be 0. This means that the impulse response must
equal zero at one or more time instants. Since the impulse response is the derivative of the
step response, this therefore means that the step response must have at least one extremum.
This explains the behaviour of the step response of the high pass filter.
# Conclusions:
The low pass filter responds by letting the low frequency sinusoid pass through without
much additional attenuation. The output decays as the input also decays.
The high pass filter responds by quickly attenuating the input. Notice that the time scales
show that the high pass filter response is orders of magnitudes faster than the low pass
response. This is because the input frequency is below the cutoff frequency, so the output
goes to 0 very fast.
In conclusion, the sympy module has allowed us to analyse quite complicated circuits by
analytically solving their node equations. We then interpreted the solutions by plotting time
domain responses using the signals toolbox. Thus, sympy combined with the scipy.signal
module is a very useful toolbox for analyzing complicated systems like the active filters in
this assignment.
| github_jupyter |
# Partial Dependence Plot
## Summary
Partial dependence plots visualize the dependence between the response and a set of target features (usually one or two), marginalizing over all the other features. For a perturbation-based interpretability method, it is relatively quick. PDP assumes independence between the features, and can be misleading interpretability-wise when this is not met (e.g. when the model has many high order interactions).
## How it Works
The PDP module for `scikit-learn` {cite}`pedregosa2011scikit` provides a succinct description of the algorithm [here](https://scikit-learn.org/stable/modules/partial_dependence.html).
Christoph Molnar's "Interpretable Machine Learning" e-book {cite}`molnar2020interpretable` has an excellent overview on partial dependence that can be found [here](https://christophm.github.io/interpretable-ml-book/pdp.html).
The conceiving paper "Greedy Function Approximation: A Gradient Boosting Machine" {cite}`friedman2001greedy` provides a good motivation and definition.
## Code Example
The following code will train a blackbox pipeline for the breast cancer dataset. Aftewards it will interpret the pipeline and its decisions with Partial Dependence Plots. The visualizations provided will be for global explanations.
```
from interpret import set_visualize_provider
from interpret.provider import InlineProvider
set_visualize_provider(InlineProvider())
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from interpret import show
from interpret.blackbox import PartialDependence
seed = 1
X, y = load_breast_cancer(return_X_y=True, as_frame=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=seed)
pca = PCA()
rf = RandomForestClassifier(n_estimators=100, n_jobs=-1)
blackbox_model = Pipeline([('pca', pca), ('rf', rf)])
blackbox_model.fit(X_train, y_train)
pdp = PartialDependence(predict_fn=blackbox_model.predict_proba, data=X_train)
pdp_global = pdp.explain_global()
show(pdp_global)
```
## Further Resources
- [Paper link to conceiving paper](https://projecteuclid.org/download/pdf_1/euclid.aos/1013203451)
- [scikit-learn on their PDP module](https://scikit-learn.org/stable/modules/partial_dependence.html)
## Bibliography
```{bibliography} references.bib
:style: unsrt
:filter: docname in docnames
```
## API
### PartialDependence
```{eval-rst}
.. autoclass:: interpret.blackbox.PartialDependence
:members:
:inherited-members:
```
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Descente-de-Gradient" data-toc-modified-id="Descente-de-Gradient-1"><span class="toc-item-num">1 </span>Descente de Gradient</a></span></li></ul></div>
Descente de Gradient
================
L'[algorithme de la descente de gradient](http://en.wikipedia.org/wiki/Gradient_descent) est un algorithme d'optimisation pour trouver un minimum local d'une fonction scalaire à partir d'un point donné, en effectuant de pas successifs dans la direction de l'inverse du gradient.
Pour une fonction $f: \mathbb{R}^n \to \mathbb{R}$, partant d'un point $\mathbf{x}_0$, la méthode calcule les points successifs dans le domaine de la fonction
$$
\mathbf{x}_{n + 1} = \mathbf{x}_n - \eta \left( \nabla f \right)_{\mathbf{x}_n} \; ,
$$
où
$\eta > 0$ est une taille de /pas/ suffisamment petite et and $\left( \nabla f \right)_{\mathbf{x}_n}$ est le [gradient](http://en.wikipedia.org/wiki/Gradient) de $f$ évaluée au point $\mathbf{x}_n$. Les valeurs successives de la fonction
$$
f(\mathbf{x}_0) \ge f(\mathbf{x}_1) \ge f(\mathbf{x}_2) \ge \dots
$$
vont décroître globalement et la séquence $\mathbf{x}_n$ converge habituellement vers un minimum local.
En pratique utiliser un pas de taille fixe $\eta$ est particulièrement inefficace et la plupart des algorithmes vont plutôt chercher à l'adapter à chaque itération.
Le code suivant implémente la descente de gradient avec un pas de taille fixe s'arrétant quand la [norme](http://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm) du gradient descend en dessous d'un certain seuil.
Attention par défaut, pytorch *accumule* les gradients à chaque passe inverse!
C'est pourquoi il faut le remettre à zéro à chaque itération.
Commençons par importer les suspects usuels
```
import torch
import numpy as np
import math
```
Illustrons l'accumulation du gradient
```
x1 = torch.empty(2, requires_grad=True)
x1
f1 = torch.pow(x1[0],2)
f1
# x1.grad.zero_()
f1.backward(retain_graph=True)
x1.grad
x1.data.sub_(torch.ones(2))
```
Maintenant essayons d'implémenter une descente de gradient pour la fonction
$f(X) = sin(x_1) + cos(x_2)$
```
x0 = torch.ones(2,requires_grad=True)
f = torch.sin(x0[0]) + torch.cos(x0[1])
f
```
On va avoir besoin de :
```python
f.backward(...) # Pour le calcul du gradient proprement dit
x.grad.data.zero_() # pour la remise à zéro du gradient après une itération
np.linalg.norm(x.grad.numpy()) # pour contrôler la convergence (norme l2)
```
On veut une fonction gd qui prend en argument $f, x, \eta, \epsilon$
```
def gd(f, x, eta, epsilon):
while 1:
f.backward(retain_graph=True)
# print(np.linalg.norm(x.grad.numpy()))
if (torch.norm(x.grad) < epsilon):
break
else:
x.data.sub_(eta * x.grad.data)
x.grad.data.zero_()
gd(f, x0, 0.9, 0.00001)
print(x0.data)
print(f.data)
```
Cette fonction ne permet pas d'avoir la valeur de $f$ directement sur le résultat. Il vaut mieux utiliser une fonction qu'un noeud de notre graphe comme argument de notre descente de gradient.
```
x0 = torch.ones(2,requires_grad=True)
x0
def f(x):
return x[0].sin() + x[1].cos()
def gd(f, x, eta, epsilon):
fval = f(x)
while 1:
fval.backward(retain_graph=True) # On a pas besoin de recalculer f(x) dans ce cas
# seul le gradient nous intéresse ici.
# notez qu'en pratique ce n'est pratiquement
# jamais le cas.
if (torch.norm(x.grad) < epsilon):
break
else:
x.data.sub_(eta * x.grad.data)
x.grad.data.zero_()
gd(f, x0, 0.9, 0.00001)
print(x0)
print(f(x0))
```
| github_jupyter |
# A - Using TorchText with Your Own Datasets
In this series we have used the IMDb dataset included as a dataset in TorchText. TorchText has many canonical datasets included for classification, language modelling, sequence tagging, etc. However, frequently you'll be wanting to use your own datasets. Luckily, TorchText has functions to help you to this.
Recall in the series, we:
- defined the `Field`s
- loaded the dataset
- created the splits
As a reminder, the code is shown below:
```python
TEXT = data.Field()
LABEL = data.LabelField()
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)
train_data, valid_data = train_data.split()
```
There are three data formats TorchText can read: `json`, `tsv` (tab separated values) and`csv` (comma separated values).
**In my opinion, the best formatting for TorchText is `json`, which I'll explain later on.**
## Reading JSON
Starting with `json`, your data must be in the `json lines` format, i.e. it must be something like:
```
{"name": "John", "location": "United Kingdom", "age": 42, "quote": ["i", "love", "the", "united kingdom"]}
{"name": "Mary", "location": "United States", "age": 36, "quote": ["i", "want", "more", "telescopes"]}
```
That is, each line is a `json` object. See `data/train.json` for an example.
We then define the fields:
```
from torchtext.legacy import data
from torchtext.legacy import datasets
NAME = data.Field()
SAYING = data.Field()
PLACE = data.Field()
```
Next, we must tell TorchText which fields apply to which elements of the `json` object.
For `json` data, we must create a dictionary where:
- the key matches the key of the `json` object
- the value is a tuple where:
- the first element becomes the batch object's attribute name
- the second element is the name of the `Field`
What do we mean when we say "becomes the batch object's attribute name"? Recall in the previous exercises where we accessed the `TEXT` and `LABEL` fields in the train/evaluation loop by using `batch.text` and `batch.label`, this is because TorchText sets the batch object to have a `text` and `label` attribute, each being a tensor containing either the text or the label.
A few notes:
* The order of the keys in the `fields` dictionary does not matter, as long as its keys match the `json` data keys.
- The `Field` name does not have to match the key in the `json` object, e.g. we use `PLACE` for the `"location"` field.
- When dealing with `json` data, not all of the keys have to be used, e.g. we did not use the `"age"` field.
- Also, if the values of `json` field are a string then the `Fields` tokenization is applied (default is to split the string on spaces), however if the values are a list then no tokenization is applied. Usually it is a good idea for the data to already be tokenized into a list, this saves time as you don't have to wait for TorchText to do it.
- The value of the `json` fields do not have to be the same type. Some examples can have their `"quote"` as a string, and some as a list. The tokenization will only get applied to the ones with their `"quote"` as a string.
- If you are using a `json` field, every single example must have an instance of that field, e.g. in this example all examples must have a name, location and quote. However, as we are not using the age field, it does not matter if an example does not have it.
```
fields = {'name': ('n', NAME), 'location': ('p', PLACE), 'quote': ('s', SAYING)}
```
Now, in a training loop we can iterate over the data iterator and access the name via `batch.n`, the location via `batch.p`, and the quote via `batch.s`.
We then create our datasets (`train_data` and `test_data`) with the `TabularDataset.splits` function.
The `path` argument specifices the top level folder common among both datasets, and the `train` and `test` arguments specify the filename of each dataset, e.g. here the train dataset is located at `data/train.json`.
We tell the function we are using `json` data, and pass in our `fields` dictionary defined previously.
```
train_data, test_data = data.TabularDataset.splits(
path = 'data',
train = 'train.json',
test = 'test.json',
format = 'json',
fields = fields
)
```
If you already had a validation dataset, the location of this can be passed as the `validation` argument.
```
train_data, valid_data, test_data = data.TabularDataset.splits(
path = 'data',
train = 'train.json',
validation = 'valid.json',
test = 'test.json',
format = 'json',
fields = fields
)
```
We can then view an example to make sure it has worked correctly.
Notice how the field names (`n`, `p` and `s`) match up with what was defined in the `fields` dictionary.
Also notice how the word `"United Kingdom"` in `p` has been split by the tokenization, whereas the `"united kingdom"` in `s` has not. This is due to what was mentioned previously, where TorchText assumes that any `json` fields that are lists are already tokenized and no further tokenization is applied.
```
print(vars(train_data[0]))
```
We can now use `train_data`, `test_data` and `valid_data` to build a vocabulary and create iterators, as in the other notebooks. We can access all attributes by using `batch.n`, `batch.p` and `batch.s` for the names, places and sayings, respectively.
## Reading CSV/TSV
`csv` and `tsv` are very similar, except csv has elements separated by commas and tsv by tabs.
Using the same example above, our `tsv` data will be in the form of:
```
name location age quote
John United Kingdom 42 i love the united kingdom
Mary United States 36 i want more telescopes
```
That is, on each row the elements are separated by tabs and we have one example per row. The first row is usually a header (i.e. the name of each of the columns), but your data could have no header.
You cannot have lists within `tsv` or `csv` data.
The way the fields are defined is a bit different to `json`. We now use a list of tuples, where each element is also a tuple. The first element of these inner tuples will become the batch object's attribute name, second element is the `Field` name.
Unlike the `json` data, the tuples have to be in the same order that they are within the `tsv` data. Due to this, when skipping a column of data a tuple of `None`s needs to be used, if not then our `SAYING` field will be applied to the `age` column of the `tsv` data and the `quote` column will not be used.
However, if you only wanted to use the `name` and `age` column, you could just use two tuples as they are the first two columns.
We change our `TabularDataset` to read the correct `.tsv` files, and change the `format` argument to `'tsv'`.
If your data has a header, which ours does, it must be skipped by passing `skip_header = True`. If not, TorchText will think the header is an example. By default, `skip_header` will be `False`.
```
fields = [('n', NAME), ('p', PLACE), (None, None), ('s', SAYING)]
train_data, valid_data, test_data = data.TabularDataset.splits(
path = 'data',
train = 'train.tsv',
validation = 'valid.tsv',
test = 'test.tsv',
format = 'tsv',
fields = fields,
skip_header = True
)
print(vars(train_data[0]))
```
Finally, we'll cover `csv` files.
This is pretty much the exact same as the `tsv` files, expect with the `format` argument set to `'csv'`.
```
fields = [('n', NAME), ('p', PLACE), (None, None), ('s', SAYING)]
train_data, valid_data, test_data = data.TabularDataset.splits(
path = 'data',
train = 'train.csv',
validation = 'valid.csv',
test = 'test.csv',
format = 'csv',
fields = fields,
skip_header = True
)
print(vars(train_data[0]))
```
## Why JSON over CSV/TSV?
1. Your `csv` or `tsv` data cannot be stored lists. This means data cannot be already be tokenized, thus everytime you run your Python script that reads this data via TorchText, it has to be tokenized. Using advanced tokenizers, such as the `spaCy` tokenizer, takes a non-negligible amount of time. Thus, it is better to tokenize your datasets and store them in the `json lines` format.
2. If tabs appear in your `tsv` data, or commas appear in your `csv` data, TorchText will think they are delimiters between columns. This will cause your data to be parsed incorrectly. Worst of all TorchText will not alert you to this as it cannot tell the difference between a tab/comma in a field and a tab/comma as a delimiter. As `json` data is essentially a dictionary, you access the data within the fields via its key, so do not have to worry about "surprise" delimiters.
## Iterators
Using any of the above datasets, we can then build the vocab and create the iterators.
```
NAME.build_vocab(train_data)
SAYING.build_vocab(train_data)
PLACE.build_vocab(train_data)
```
Then, we can create the iterators after defining our batch size and device.
By default, the train data is shuffled each epoch, but the validation/test data is sorted. However, TorchText doesn't know what to use to sort our data and it would throw an error if we don't tell it.
There are two ways to handle this, you can either tell the iterator not to sort the validation/test data by passing `sort = False`, or you can tell it how to sort the data by passing a `sort_key`. A sort key is a function that returns a key on which to sort the data on. For example, `lambda x: x.s` will sort the examples by their `s` attribute, i.e their quote. Ideally, you want to use a sort key as the `BucketIterator` will then be able to sort your examples and then minimize the amount of padding within each batch.
We can then iterate over our iterator to get batches of data. Note how by default TorchText has the batch dimension second.
```
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
BATCH_SIZE = 1
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
sort = False, #don't sort test/validation data
batch_size=BATCH_SIZE,
device=device)
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
sort_key = lambda x: x.s, #sort by s attribute (quote)
batch_size=BATCH_SIZE,
device=device)
print('Train:')
for batch in train_iterator:
print(batch)
print('Valid:')
for batch in valid_iterator:
print(batch)
print('Test:')
for batch in test_iterator:
print(batch)
```
| github_jupyter |
## Data Analysis
```
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(1)
# load data
df = pd.read_csv('../input_data/heartdisease_data.csv',sep= ',')
df[0:10]
```
The data contains 13 features:<br/>
0) age: Age (years) --> discrete <br/>
1) sex: Sex (1: male, 0: female) --> categorical <br/>
2) cp: Chest pain type (1: typical angina, 2: atypical angina, 3: non-anginal pain, 4: asymptomatic) --> categorical <br/>
3) trestbps: Resting blood pressure (mm Hg on admission to the hospital) --> continuous <br/>
4) chol: Cholesterol measurement (mg/dl) --> continuous <br/>
5) fbs: Fasting blood sugar (0: <120 mg/dl, 1: > 120 mg/dl) --> categorical <br/>
6) restecg: Resting electrocardiographic measurement (0: normal, 1: having ST-T wave abnormality, 2: showing probable or definite left ventricular hypertrophy by Estes' criteria) --> categorical <br/>
7) thalach: Maximum heart rate achieved --> continuous<br/>
8) exang: Exercise induced angina (1: yes; 0: no) --> categorical <br/>
9) oldpeak: ST depression induced by exercise relative to rest ('ST' relates to positions on the ECG plot) --> continuous<br/>
10) slope: The slope of the peak exercise ST segment (1: upsloping, 2: flat, 3: downsloping) --> categorical<br/>
11) ca: The number of major vessels (0-3) --> categorical <br/>
12) thal: Thalassemia (a type of blood disorder) (3: normal; 6: fixed defect; 7: reversable defect) --> categorical <br/>
and 1 target: Heart disease (0: no, 1: yes) <br/>
```
# select features and target:
df = np.array(df).astype(float)
# features:
X = df[:,:-1]
l,n = X.shape
print(l,n)
# target:
y = df[:,-1]
```
### Features
```
"""
plt.figure(figsize=(11,6))
features = s[0,:8]
for j in range(2):
for i in range(4):
ii = j*4 + i
plt.subplot2grid((2,4),(j,i))
bins = np.linspace(min(X[:,ii]), max(X[:,ii]),10, endpoint=False)
plt.hist(X[:,ii],bins,histtype='bar',rwidth=0.8,normed=True)
plt.title('%s'%features[ii])
plt.tight_layout(h_pad=1, w_pad=1.5)
"""
```
### Target
```
plt.figure(figsize=(4,3))
plt.bar(0,sum(y==0)/float(l),width=0.8,color='blue',label='non disease')
plt.bar(1,sum(y==1)/float(l),width=0.8,color='red',label='disease')
plt.xlabel('0: non disease, 1: disease')
plt.title('target')
```
### 0) Age
```
ct = pd.crosstab(X[:,0], y)
ct.plot.bar(stacked=True,figsize=(12,3))
plt.xlabel('age')
```
### 1) Sex
```
ct = pd.crosstab(X[:,1], y)
ct.plot.bar(stacked=True,figsize=(4,3))
plt.xlabel('0: female, 1: male')
```
### 2) Chest pain type
```
ct = pd.crosstab(X[:,2], y)
ct.plot.bar(stacked=True,figsize=(8,3))
plt.xlabel('Chest pain type')
```
### 3) Resting blood pressure
```
#ct = pd.crosstab(X[:,3], y)
#ct.plot.histo(stacked=True,figsize=(10,3))
#plt.xlabel('Resting blood pressure')
```
### 5) Fasting blood sugar
```
pd.crosstab(X[:,5], y).plot.bar(stacked=True,figsize=(4,3))
plt.xlabel('0: <120 mg/dl, 1: > 120 mg/dl')
```
| github_jupyter |
# Single Qubit Gates
In the previous section we looked at all the possible states a qubit could be in. We saw that qubits could be represented by 2D vectors, and that their states are limited to the form:
$$ |q\rangle = \cos{(\tfrac{\theta}{2})}|0\rangle + e^{i\phi}\sin{\tfrac{\theta}{2}}|1\rangle $$
Where $\theta$ and $\phi$ are real numbers. In this section we will cover _gates,_ the operations that change a qubit between these states. Due to the number of gates and the similarities between them, this chapter is at risk of becoming a list. To counter this, we have included a few digressions to introduce important ideas at appropriate places throughout the chapter.
In _The Atoms of Computation_ we came across some gates and used them to perform a classical computation. An important feature of quantum circuits is that, between initialising the qubits and measuring them, the operations (gates) are *_always_* reversible! These reversible gates can be represented as matrices, and as rotations around the Bloch sphere.
```
from qiskit import *
from math import pi
from qiskit.visualization import plot_bloch_multivector
```
## 1. The Pauli Gates <a id="pauli"></a>
You should be familiar with the Pauli matrices from the linear algebra section. If any of the maths here is new to you, you should use the linear algebra section to bring yourself up to speed. We will see here that the Pauli matrices can represent some very commonly used quantum gates.
### 1.1 The X-Gate <a id="xgate"></a>
The X-gate is represented by the Pauli-X matrix:
$$ X = \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix} = |0\rangle\langle1| + |1\rangle\langle0| $$
To see the effect a gate has on a qubit, we simply multiply the qubit’s statevector by the gate. We can see that the X-gate switches the amplitudes of the states $|0\rangle$ and $|1\rangle$:
$$ X|0\rangle = \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}\begin{bmatrix} 1 \\ 0 \end{bmatrix} = \begin{bmatrix} 0 \\ 1 \end{bmatrix} = |1\rangle$$
<!-- ::: q-block.reminder -->
## Reminders
<details>
<summary>Multiplying Vectors by Matrices</summary>
Matrix multiplication is a generalisation of the inner product we saw in the last chapter. In the specific case of multiplying a vector by a matrix (as seen above), we always get a vector back:
$$ M|v\rangle = \begin{bmatrix}a & b \\ c & d \end{bmatrix}\begin{bmatrix}v_0 \\ v_1 \end{bmatrix}
= \begin{bmatrix}a\cdot v_0 + b \cdot v_1 \\ c \cdot v_0 + d \cdot v_1 \end{bmatrix} $$
In quantum computing, we can write our matrices in terms of basis vectors:
$$X = |0\rangle\langle1| + |1\rangle\langle0|$$
This can sometimes be clearer than using a box matrix as we can see what different multiplications will result in:
$$
\begin{aligned}
X|1\rangle & = (|0\rangle\langle1| + |1\rangle\langle0|)|1\rangle \\
& = |0\rangle\langle1|1\rangle + |1\rangle\langle0|1\rangle \\
& = |0\rangle \times 1 + |1\rangle \times 0 \\
& = |0\rangle
\end{aligned}
$$
In fact, when we see a ket and a bra multiplied like this:
$$ |a\rangle\langle b| $$
this is called the _outer product_, which follows the rule:
$$
|a\rangle\langle b| =
\begin{bmatrix}
a_0 b_0 & a_0 b_1 & \dots & a_0 b_n\\
a_1 b_0 & \ddots & & \vdots \\
\vdots & & \ddots & \vdots \\
a_n b_0 & \dots & \dots & a_n b_n \\
\end{bmatrix}
$$
We can see this does indeed result in the X-matrix as seen above:
$$
|0\rangle\langle1| + |1\rangle\langle0| =
\begin{bmatrix}0 & 1 \\ 0 & 0 \end{bmatrix} +
\begin{bmatrix}0 & 0 \\ 1 & 0 \end{bmatrix} =
\begin{bmatrix}0 & 1 \\ 1 & 0 \end{bmatrix} = X
$$
</details>
<!-- ::: -->
In Qiskit, we can create a short circuit to verify this:
```
# Let's do an X-gate on a |0> qubit
qc = QuantumCircuit(1)
qc.x(0)
qc.draw()
```
Let's see the result of the above circuit. **Note:** Here we use <code>plot_bloch_multivector()</code> which takes a qubit's statevector instead of the Bloch vector.
```
# Let's see the result
backend = Aer.get_backend('statevector_simulator')
out = execute(qc,backend).result().get_statevector()
plot_bloch_multivector(out)
```
We can indeed see the state of the qubit is $|1\rangle$ as expected. We can think of this as a rotation by $\pi$ radians around the *x-axis* of the Bloch sphere. The X-gate is also often called a NOT-gate, referring to its classical analogue.
### 1.2 The Y & Z-gates <a id="ynzgatez"></a>
Similarly to the X-gate, the Y & Z Pauli matrices also act as the Y & Z-gates in our quantum circuits:
$$ Y = \begin{bmatrix} 0 & -i \\ i & 0 \end{bmatrix} \quad\quad\quad\quad Z = \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix} $$
$$ Y = -i|0\rangle\langle1| + i|1\rangle\langle0| \quad\quad Z = |0\rangle\langle0| - |1\rangle\langle1| $$
And, unsurprisingly, they also respectively perform rotations by [[$\pi$|$2\pi$|$\frac{\pi}{2}$]] around the y and z-axis of the Bloch sphere.
Below is a widget that displays a qubit’s state on the Bloch sphere, pressing one of the buttons will perform the gate on the qubit:
```
# Run the code in this cell to see the widget
from qiskit_textbook.widgets import gate_demo
gate_demo(gates='pauli')
```
In Qiskit, we can apply the Y and Z-gates to our circuit using:
```
qc.y(0) # Do Y-gate on qubit 0
qc.z(0) # Do Z-gate on qubit 0
qc.draw()
```
## 2. Digression: The X, Y & Z-Bases <a id="xyzbases"></a>
<!-- ::: q-block.reminder -->
## Reminders
<details>
<summary>Eigenvectors of Matrices</summary>
We have seen that multiplying a vector by a matrix results in a vector:
$$
M|v\rangle = |v'\rangle \leftarrow \text{new vector}
$$
If we chose the right vectors and matrices, we can find a case in which this matrix multiplication is the same as doing a multiplication by a scalar:
$$
M|v\rangle = \lambda|v\rangle
$$
(Above, $M$ is a matrix, and $\lambda$ is a scalar). For a matrix $M$, any vector that has this property is called an <i>eigenvector</i> of $M$. For example, the eigenvectors of the Z-matrix are the states $|0\rangle$ and $|1\rangle$:
$$
\begin{aligned}
Z|0\rangle & = |0\rangle \\
Z|1\rangle & = -|1\rangle
\end{aligned}
$$
Since we use vectors to describe the state of our qubits, we often call these vectors <i>eigenstates</i> in this context. Eigenvectors are very important in quantum computing, and it is important you have a solid grasp of them.
</details>
<!-- ::: -->
You may also notice that the Z-gate appears to have no effect on our qubit when it is in either of these two states. This is because the states $|0\rangle$ and $|1\rangle$ are the two _eigenstates_ of the Z-gate. In fact, the _computational basis_ (the basis formed by the states $|0\rangle$ and $|1\rangle$) is often called the Z-basis. This is not the only basis we can use, a popular basis is the X-basis, formed by the eigenstates of the X-gate. We call these two vectors $|+\rangle$ and $|-\rangle$:
$$ |+\rangle = \tfrac{1}{\sqrt{2}}(|0\rangle + |1\rangle) = \tfrac{1}{\sqrt{2}}\begin{bmatrix} 1 \\ 1 \end{bmatrix}$$
$$ |-\rangle = \tfrac{1}{\sqrt{2}}(|0\rangle - |1\rangle) = \tfrac{1}{\sqrt{2}}\begin{bmatrix} 1 \\ -1 \end{bmatrix} $$
Another less commonly used basis is that formed by the eigenstates of the Y-gate. These are called:
$$ |\circlearrowleft\rangle, \quad |\circlearrowright\rangle$$
We leave it as an exercise to calculate these. There are in fact an infinite number of bases; to form one, we simply need two orthogonal vectors.
### Quick Exercises
1. Verify that $|+\rangle$ and $|-\rangle$ are in fact eigenstates of the X-gate.
2. What eigenvalues do they have?
3. Why would we not see these eigenvalues appear on the Bloch sphere?
4. Find the eigenstates of the Y-gate, and their co-ordinates on the Bloch sphere.
Using only the Pauli-gates it is impossible to move our initialised qubit to any state other than $|0\rangle$ or $|1\rangle$, i.e. we cannot achieve superposition. This means we can see no behaviour different to that of a classical bit. To create more interesting states we will need more gates!
## 3. The Hadamard Gate <a id="hgate"></a>
The Hadamard gate (H-gate) is a fundamental quantum gate. It allows us to move away from the poles of the Bloch sphere and create a superposition of $|0\rangle$ and $|1\rangle$. It has the matrix:
$$ H = \tfrac{1}{\sqrt{2}}\begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix} $$
We can see that this performs the transformations below:
$$ H|0\rangle = |+\rangle $$
$$ H|1\rangle = |-\rangle $$
This can be thought of as a rotation around the Bloch vector `[1,0,1]` (the line between the x & z-axis), or as transforming the state of the qubit between the X and Z bases.
You can play around with these gates using the widget below:
```
# Run the code in this cell to see the widget
from qiskit_textbook.widgets import gate_demo
gate_demo(gates='pauli+h')
```
### Quick Exercise
1. Write the H-gate as the outer products of vectors $|0\rangle$, $|1\rangle$, $|+\rangle$ and $|-\rangle$.
2. Show that applying the sequence of gates: HZH, to any qubit state is equivalent to applying an X-gate.
3. Find a combination of X, Z and H-gates that is equivalent to a Y-gate (ignoring global phase).
## 4. Digression: Measuring in Different Bases <a id="measuring"></a>
We have seen that the Z-axis is not intrinsically special, and that there are infinitely many other bases. Similarly with measurement, we don’t always have to measure in the computational basis (the Z-basis), we can measure our qubits in any basis.
As an example, let’s try measuring in the X-basis. We can calculate the probability of measuring either $|+\rangle$ or $|-\rangle$:
$$ p(|+\rangle) = |\langle+|q\rangle|^2, \quad p(|-\rangle) = |\langle-|q\rangle|^2 $$
And after measurement, we are guaranteed to have a qubit in one of these two states. Since Qiskit only allows measuring in the Z-basis, we must create our own using Hadamard gates:
```
# Create the X-measurement function:
def x_measurement(qc,qubit,cbit):
"""Measure 'qubit' in the X-basis, and store the result in 'cbit'"""
qc.h(qubit)
qc.measure(qubit, cbit)
qc.h(qubit)
return qc
initial_state = [0,1]
# Initialise our qubit and measure it
qc = QuantumCircuit(1,1)
qc.initialize(initial_state, 0)
x_measurement(qc, 0, 0) # measure qubit 0 to classical bit 0
qc.draw()
```
In the quick exercises above, we saw you could create an X-gate by sandwiching our Z-gate between two H-gates:
$$ X = HZH $$
Starting in the Z-basis, the H-gate switches our qubit to the X-basis, the Z-gate performs a NOT in the X-basis, and the final H-gate returns our qubit to the Z-basis.
<img src="images/bloch_HZH.svg">
We can verify this always behaves like an X-gate by multiplying the matrices:
$$
HZH =
\tfrac{1}{\sqrt{2}}\begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix}
\begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}
\tfrac{1}{\sqrt{2}}\begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix}
=
\begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}
=X
$$
Following the same logic, we have created an X-measurement by sandwiching our Z-measurement between two H-gates.
<img src="images/x-measurement.svg">
Let’s now see the results:
```
backend = Aer.get_backend('statevector_simulator') # Tell Qiskit how to simulate our circuit
out_state = execute(qc,backend).result().get_statevector() # Do the simulation, returning the state vector
plot_bloch_multivector(out_state) # Display the output state vector
```
We initialised our qubit in the state $|1\rangle$, but we can see that, after the measurement, we have collapsed our qubit to the states $|+\rangle$ or $|-\rangle$. If you run the cell again, you will see different results, but the final state of the qubit will always be $|+\rangle$ or $|-\rangle$.
### Quick Exercises
1. If we initialise our qubit in the state $|+\rangle$, what is the probability of measuring it in state $|-\rangle$?
2. Use Qiskit to display the probability of measuring a $|0\rangle$ qubit in the states $|+\rangle$ and $|-\rangle$ (**Hint:** you might want to use <code>.get_counts()</code> and <code>plot_histogram()</code>).
3. Try to create a function that measures in the Y-basis.
Measuring in different bases allows us to see Heisenberg’s famous uncertainty principle in action. Having certainty of measuring a state in the Z-basis removes all certainty of measuring a specific state in the X-basis, and vice versa. A common misconception is that the uncertainty is due to the limits in our equipment, but here we can see the uncertainty is actually part of the nature of the qubit.
For example, if we put our qubit in the state $|0\rangle$, our measurement in the Z-basis is certain to be $|0\rangle$, but our measurement in the X-basis is completely random! Similarly, if we put our qubit in the state $|-\rangle$, our measurement in the X-basis is certain to be $|-\rangle$, but now any measurement in the Z-basis will be completely random.
More generally: _Whatever state our quantum system is in, there is always a measurement that has a deterministic outcome._
The introduction of the H-gate has allowed us to explore some interesting phenomena, but we are still very limited in our quantum operations. Let us now introduce a new type of gate:
## The R<sub>ϕ</sub>-gate
The $R_\phi$-gate is _parametrised,_ that is, it needs a number ($\phi$) to tell it exactly what to do. The $R_\phi$-gate performs a rotation of $\phi$ around the Z-axis direction (and as such is sometimes also known as the $R_z$-gate). It has the matrix:
$$
R_\phi = \begin{bmatrix} 1 & 0 \\ 0 & e^{i\phi} \end{bmatrix}
$$
Where $\phi$ is a real number.
You can use the widget below to play around with the $R_\phi$-gate, specify $\phi$ using the slider:
```
# Run the code in this cell to see the widget
from qiskit_textbook.widgets import gate_demo
gate_demo(gates='pauli+h+rz')
```
In Qiskit, we specify an $R_\phi$-gate using `rz(phi, qubit)`:
```
qc = QuantumCircuit(1)
qc.rz(pi/4, 0)
qc.draw()
```
You may notice that the Z-gate is a special case of the $R_\phi$-gate, with $\phi = \pi$. In fact there are three more commonly referenced gates we will mention in this chapter, all of which are special cases of the $R_\phi$-gate:
## 6. The I, S and T-gates <a id="istgates"></a>
### 6.1 The I-gate <a id="igate"></a>
First comes the I-gate (aka ‘Id-gate’ or ‘Identity gate’). This is simply a gate that does nothing. Its matrix is the identity matrix:
$$
I = \begin{bmatrix} 1 & 0 \\ 0 & 1\end{bmatrix}
$$
Applying the identity gate anywhere in your circuit should have no effect on the qubit state, so it’s interesting this is even considered a gate. There are two main reasons behind this, one is that it is often used in calculations, for example: proving the X-gate is its own inverse:
$$ I = XX $$
The second, is that it is often useful when considering real hardware to specify a ‘do-nothing’ or ‘none’ operation.
#### Quick Exercise
1. What are the eigenstates of the I-gate?
### 6.2 The S-gates <a id="sgate"></a>
The next gate to mention is the S-gate (sometimes known as the $\sqrt{Z}$-gate), this is an $R_\phi$-gate with $\phi = \pi/2$. It does a quarter-turn around the Bloch sphere. It is important to note that unlike every gate introduced in this chapter so far, the S-gate is **not** its own inverse! As a result, you will often see the $S^\dagger$-gate, (also “S-dagger”, “Sdg” or $\sqrt{Z}^\dagger$-gate). The $S^\dagger$-gate is clearly an $R_\phi$-gate with $\phi = -\pi/2$:
$$ S = \begin{bmatrix} 1 & 0 \\ 0 & e^{\frac{i\pi}{2}} \end{bmatrix}, \quad S^\dagger = \begin{bmatrix} 1 & 0 \\ 0 & e^{-\frac{i\pi}{2}} \end{bmatrix}$$
The name "$\sqrt{Z}$-gate" is due to the fact that two successively applied S-gates has the same effect as one Z-gate:
$$ SS|q\rangle = Z|q\rangle $$
This notation is common throughout quantum computing.
To add an S-gate in Qiskit:
```
qc = QuantumCircuit(1)
qc.s(0) # Apply S-gate to qubit 0
qc.sdg(0) # Apply Sdg-gate to qubit 0
qc.draw()
```
### 6.3 The T-gate <a id="tgate"></a>
The T-gate is a very commonly used gate, it is an $R_\phi$-gate with $\phi = \pi/4$:
$$ T = \begin{bmatrix} 1 & 0 \\ 0 & e^{\frac{i\pi}{4}} \end{bmatrix}, \quad T^\dagger = \begin{bmatrix} 1 & 0 \\ 0 & e^{-\frac{i\pi}{4}} \end{bmatrix}$$
As with the S-gate, the T-gate is sometimes also known as the $\sqrt[4]{Z}$-gate.
In Qiskit:
```
qc = QuantumCircuit(1)
qc.t(0) # Apply T-gate to qubit 0
qc.tdg(0) # Apply Tdg-gate to qubit 0
qc.draw()
```
You can use the widget below to play around with all the gates introduced in this chapter so far:
```
# Run the code in this cell to see the widget
from qiskit_textbook.widgets import gate_demo
gate_demo()
```
## 7. General U-gates <a id="generalU3"></a>
As we saw earlier, the I, Z, S & T-gates were all special cases of the more general $R_\phi$-gate. In the same way, the $U_3$-gate is the most general of all single-qubit quantum gates. It is a parametrised gate of the form:
$$
U_3(\theta, \phi, \lambda) = \begin{bmatrix} \cos(\theta/2) & -e^{i\lambda}\sin(\theta/2) \\
e^{i\phi}\sin(\theta/2) & e^{i\lambda+i\phi}\cos(\theta/2)
\end{bmatrix}
$$
Every gate in this chapter could be specified as $U_3(\theta,\phi,\lambda)$, but it is unusual to see this in a circuit diagram, possibly due to the difficulty in reading this.
Qiskit provides $U_2$ and $U_1$-gates, which are specific cases of the $U_3$ gate in which $\theta = \tfrac{\pi}{2}$, and $\theta = \phi = 0$ respectively. You will notice that the $U_1$-gate is equivalent to the $R_\phi$-gate.
$$
\begin{aligned}
U_3(\tfrac{\pi}{2}, \phi, \lambda) = U_2 = \tfrac{1}{\sqrt{2}}\begin{bmatrix} 1 & -e^{i\lambda} \\
e^{i\phi} & e^{i\lambda+i\phi}
\end{bmatrix}
& \quad &
U_3(0, 0, \lambda) = U_1 = \begin{bmatrix} 1 & 0 \\
0 & e^{i\lambda}\\
\end{bmatrix}
\end{aligned}
$$
Before running on real IBM quantum hardware, all single-qubit operations are compiled down to $U_1$ , $U_2$ and $U_3$ . For this reason they are sometimes called the _physical gates_.
It should be obvious from this that there are an infinite number of possible gates, and that this also includes $R_x$ and $R_y$-gates, although they are not mentioned here. It must also be noted that there is nothing special about the Z-basis, except that it has been selected as the standard computational basis. That is why we have names for the S and T-gates, but not their X and Y equivalents (e.g. $\sqrt{X}$ and $\sqrt[4]{Y}$).
```
import qiskit
qiskit.__qiskit_version__
```
| github_jupyter |
```
# Copyright 2021 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
```
<img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;">
# Python API Examples
This notebook walks through the basics of the Riva Speech and Language AI Services.
## Overview
NVIDIA Riva is a platform for building and deploying AI applications that fuse vision, speech and other sensors. It offers a complete workflow to build, train and deploy AI systems that can use visual cues such as gestures and gaze along with speech in context. With the Riva platform, you can:
- Build speech and visual AI applications using pretrained NVIDIA Neural Modules ([NeMo](https://github.com/NVIDIA/NeMo)) available at NVIDIA GPU Cloud ([NGC](https://ngc.nvidia.com/catalog/models?orderBy=modifiedDESC&query=%20label%3A%22NeMo%2FPyTorch%22&quickFilter=models&filters=)).
- Transfer learning: re-train your model on domain-specific data, with NVIDIA [NeMo](https://github.com/NVIDIA/NeMo). NeMo is a toolkit and platform that enables researchers to define and build new state-of-the-art speech and natural language processing models.
- Optimize neural network performance and latency using NVIDIA TensorRT
- Deploy AI applications with TensorRT Inference Server:
- Support multiple network formats: ONNX, TensorRT plans, PyTorch TorchScript models.
- Deployement on multiple platforms: from datacenter to edge servers, via Helm to K8s cluster, on NVIDIA Volta/Turing GPUs or Jetson Xavier platforms.
See the below video for a demo of Riva capabilities.
```
from IPython.display import IFrame
# Riva Youtube demo video
IFrame("https://www.youtube.com/embed/r264lBi1nMU?rel=0&controls=0&showinfo=0", width="560", height="315", frameborder="0", allowfullscreen=True)
```
For more detailed information on Riva, please refer to the [Riva developer documentation](https://developer.nvidia.com/).
## Introduction the Riva Speech and Natural Languages services
Riva offers a rich set of speech and natural language understanding services such as:
- Automated speech recognition (ASR)
- Text-to-Speech synthesis (TTS)
- A collection of natural language understanding services such as named entity recognition (NER), punctuation, intent classification.
## Learning objectives
- Understand how interact with Riva Speech and Natural Languages APIs, services and use cases
## Requirements and setup
To execute this notebook, please follow the setup steps in [README](./README.md).
We first generate some required libraries.
```
import io
import librosa
from time import time
import numpy as np
import IPython.display as ipd
import grpc
import requests
# NLP proto
import riva_api.riva_nlp_pb2 as rnlp
import riva_api.riva_nlp_pb2_grpc as rnlp_srv
# ASR proto
import riva_api.riva_asr_pb2 as rasr
import riva_api.riva_asr_pb2_grpc as rasr_srv
# TTS proto
import riva_api.riva_tts_pb2 as rtts
import riva_api.riva_tts_pb2_grpc as rtts_srv
import riva_api.riva_audio_pb2 as ra
```
### Create Riva clients and connect to Riva Speech API server
The below URI assumes a local deployment of the Riva Speech API server on the default port. In case the server deployment is on a different host or via Helm chart on Kubernetes, the user should use an appropriate URI.
```
channel = grpc.insecure_channel('localhost:50051')
riva_asr = rasr_srv.RivaSpeechRecognitionStub(channel)
riva_nlp = rnlp_srv.RivaLanguageUnderstandingStub(channel)
riva_tts = rtts_srv.RivaSpeechSynthesisStub(channel)
```
## Content
1. [Offline ASR Example](#1)
1. [Core NLP Service Examples](#2)
1. [TTS Service Example](#3)
1. [Riva NLP Service Examples](#4)
<a id="1"></a>
## 1. Offline ASR Example
Riva Speech API supports `.wav` files in PCM format, `.alaw`, `.mulaw` and `.flac` formats with single channel in this release.
```
# This example uses a .wav file with LINEAR_PCM encoding.
# read in an audio file from local disk
path = "/work/wav/sample.wav"
audio, sr = librosa.core.load(path, sr=None)
with io.open(path, 'rb') as fh:
content = fh.read()
ipd.Audio(path)
# Set up an offline/batch recognition request
req = rasr.RecognizeRequest()
req.audio = content # raw bytes
req.config.encoding = ra.AudioEncoding.LINEAR_PCM # Supports LINEAR_PCM, FLAC, MULAW and ALAW audio encodings
req.config.sample_rate_hertz = sr # Audio will be resampled if necessary
req.config.language_code = "en-US" # Ignored, will route to correct model in future release
req.config.max_alternatives = 1 # How many top-N hypotheses to return
req.config.enable_automatic_punctuation = True # Add punctuation when end of VAD detected
req.config.audio_channel_count = 1 # Mono channel
response = riva_asr.Recognize(req)
asr_best_transcript = response.results[0].alternatives[0].transcript
print("ASR Transcript:", asr_best_transcript)
print("\n\nFull Response Message:")
print(response)
```
<a id="2"></a>
## 2. Core NLP Service Examples
All of the Core NLP Services support batched requests. The maximum batch size,
if any, of the underlying models is hidden from the end user and automatically
batched by the Riva and TRTIS servers.
The Core NLP API provides three methods currently:
1. TransformText - map an input string to an output string
2. ClassifyText - return a single label for the input string
3. ClassifyTokens - return a label per input token
```
# Use the TextTransform API to run the punctuation model
req = rnlp.TextTransformRequest()
req.model.model_name = "riva_punctuation"
req.text.append("add punctuation to this sentence")
req.text.append("do you have any red nvidia shirts")
req.text.append("i need one cpu four gpus and lots of memory "
"for my new computer it's going to be very cool")
nlp_resp = riva_nlp.TransformText(req)
print("TransformText Output:")
print("\n".join([f" {x}" for x in nlp_resp.text]))
# Use the TokenClassification API to run a Named Entity Recognition (NER) model
# Note: the model configuration of the NER model indicates that the labels are
# in IOB format. Riva, subsequently, knows to:
# a) ignore 'O' labels
# b) Remove B- and I- prefixes from labels
# c) Collapse sequences of B- I- ... I- tokens into a single token
req = rnlp.TokenClassRequest()
req.model.model_name = "riva_ner" # If you have deployed a custom model with the domain_name
# parameter in ServiceMaker's `riva-build` command then you should use
# "riva_ner_<your_input_domain_name>" where <your_input_domain_name>
# is the name you provided to the domain_name parameter.
req.text.append("Jensen Huang is the CEO of NVIDIA Corporation, "
"located in Santa Clara, California")
resp = riva_nlp.ClassifyTokens(req)
print("Named Entities:")
for result in resp.results[0].results:
print(f" {result.token} ({result.label[0].class_name})")
# Submit a TextClassRequest for text classification.
# Riva NLP comes with a default text_classification domain called "domain_misty" which consists of
# 4 classes: meteorology, personality, weather and nomatch
request = rnlp.TextClassRequest()
request.model.model_name = "riva_text_classification_domain" # If you have deployed a custom model
# with the `--domain_name` parameter in ServiceMaker's `riva-build` command
# then you should use "riva_text_classification_<your_input_domain_name>"
# where <your_input_domain_name> is the name you provided to the
# domain_name parameter. In this case the domain_name is "domain"
request.text.append("Is it going to snow in Burlington, Vermont tomorrow night?")
request.text.append("What causes rain?")
request.text.append("What is your favorite season?")
ct_response = riva_nlp.ClassifyText(request)
print(ct_response)
```
<a id="3"></a>
## 3. TTS Service Example
Subsequent releases will include added features, including model registration to support multiple languages/voices with the same API. Support for resampling to alternative sampling rates will also be added.
```
req = rtts.SynthesizeSpeechRequest()
req.text = "Is it recognize speech or wreck a nice beach?"
req.language_code = "en-US" # currently required to be "en-US"
req.encoding = ra.AudioEncoding.LINEAR_PCM # Supports LINEAR_PCM, FLAC, MULAW and ALAW audio encodings
req.sample_rate_hz = 22050 # ignored, audio returned will be 22.05KHz
req.voice_name = "ljspeech" # ignored
resp = riva_tts.Synthesize(req)
audio_samples = np.frombuffer(resp.audio, dtype=np.float32)
ipd.Audio(audio_samples, rate=22050)
```
<a id="4"></a>
## 4. Riva NLP Service Examples
The NLP Service contains higher-level/more application-specific NLP APIs. This
guide demonstrates how the AnalyzeIntent API can be used for queries across
both known and unknown domains.
```
# The AnalyzeIntent API can be used to query a Intent Slot classifier. The API can leverage a
# text classification model to classify the domain of the input query and then route to the
# appropriate intent slot model.
# Lets first see an example where the domain is known. This skips execution of the domain classifier
# and proceeds directly to the intent/slot model for the requested domain.
req = rnlp.AnalyzeIntentRequest()
req.query = "How is the humidity in San Francisco?"
req.options.domain = "weather" # The <domain_name> is appended to "riva_intent_" to look for a
# model "riva_intent_<domain_name>". So in this e.g., the model "riva_intent_weather"
# needs to be preloaded in riva server. If you would like to deploy your
# custom Joint Intent and Slot model use the `--domain_name` parameter in
# ServiceMaker's `riva-build intent_slot` command.
resp = riva_nlp.AnalyzeIntent(req)
print(resp)
# Below is an example where the input domain is not provided.
req = rnlp.AnalyzeIntentRequest()
req.query = "Is it going to rain tomorrow?"
# The input query is first routed to the a text classification model called "riva_text_classification_domain"
# The output class label of "riva_text_classification_domain" is appended to "riva_intent_"
# to get the appropriate Intent Slot model to execute for the input query.
# Note: The model "riva_text_classification_domain" needs to be loaded into Riva server and have the appropriate
# class labels that would invoke the corresponding intent slot model.
resp = riva_nlp.AnalyzeIntent(req)
print(resp)
# Some weather Intent queries
queries = [
"Is it currently cloudy in Tokyo?",
"What is the annual rainfall in Pune?",
"What is the humidity going to be tomorrow?"
]
for q in queries:
req = rnlp.AnalyzeIntentRequest()
req.query = q
start = time()
resp = riva_nlp.AnalyzeIntent(req)
print(f"[{resp.intent.class_name}]\t{req.query}")
# Demonstrate latency by calling repeatedly.
# NOTE: this is a synchronous API call, so request #N will not be sent until
# response #N-1 is returned. This means latency and throughput will be negatively
# impacted by long-distance & VPN connections
req = rnlp.TextTransformRequest()
req.text.append("i need one cpu four gpus and lots of memory for my new computer it's going to be very cool")
iterations = 10
# Demonstrate synchronous performance
start_time = time()
for _ in range(iterations):
nlp_resp = riva_nlp.PunctuateText(req)
end_time = time()
print(f"Time to complete {iterations} synchronous requests: {end_time-start_time}")
# Demonstrate async performance
start_time = time()
futures = []
for _ in range(iterations):
futures.append(riva_nlp.PunctuateText.future(req))
for f in futures:
f.result()
end_time = time()
print(f"Time to complete {iterations} asynchronous requests: {end_time-start_time}\n")
```
<a id="5"></a>
## 5. Go deeper into Riva capabilities
Now that you have a basic introduction to the Riva APIs, you may like to try out:
### 1. Sample apps:
Riva comes with various sample apps as a demonstration for how to use the APIs to build interesting applications such as a [chatbot](https://docs.nvidia.com/deeplearning/riva/user-guide/docs/samples/weather.html), a domain specific speech recognition or [keyword (entity) recognition system](https://docs.nvidia.com/deeplearning/riva/user-guide/docs/samples/callcenter.html), or simply how Riva allows scaling out for handling massive amount of requests at the same time. ([SpeechSquad)](https://docs.nvidia.com/deeplearning/riva/user-guide/docs/samples/speechsquad.html)
Have a look at the Sample Application section in the [Riva developer documentation](https://developer.nvidia.com/) for all the sample apps.
### 2. Finetune your own domain specific Speech or NLP model and deploy into Riva.
Train the latest state-of-the-art speech and natural language processing models on your own data using [NeMo](https://github.com/NVIDIA/NeMo) or [Transfer Learning ToolKit](https://developer.nvidia.com/transfer-learning-toolkit) and deploy them on Riva using the [Riva ServiceMaker tool](https://docs.nvidia.com/deeplearning/riva/user-guide/docs/model-servicemaker.html).
### 3. Further resources:
Explore the details of each of the APIs and their functionalities in the [docs](https://docs.nvidia.com/deeplearning/jarvis/user-guide/docs/protobuf-api/protobuf-api-root.html).
| github_jupyter |
```
!pip install pyyaml==5.1
import torch
TORCH_VERSION = ".".join(torch.__version__.split(".")[:2])
CUDA_VERSION = torch.__version__.split("+")[-1]
print("torch: ", TORCH_VERSION, "; cuda: ", CUDA_VERSION)
# Install detectron2 that matches the above pytorch version
# See https://detectron2.readthedocs.io/tutorials/install.html for instructions
!pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/$CUDA_VERSION/torch$TORCH_VERSION/index.html
# If there is not yet a detectron2 release that matches the given torch + CUDA version, you need to install a different pytorch.
# exit(0) # After installation, you may need to "restart runtime" in Colab. This line can also restart runtime
!nvidia-smi
!nvcc --version
from google.colab import drive
drive.mount('/content/gdrive')
project_path = '/content/gdrive/MyDrive/madeira'
images_path = f'{project_path}'
# Some basic setup:
# Setup detectron2 logger
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import os, json, cv2, random
import datetime
from google.colab.patches import cv2_imshow
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.data.datasets import register_coco_instances
register_coco_instances("train_coco", {}, f"{images_path}/train_coco/annotations.json", f'{images_path}/train_coco')
train_dataset = DatasetCatalog.get("train_coco")
train_metadata = MetadataCatalog.get("train_coco")
register_coco_instances("test_coco", {}, f"{images_path}/test_coco/annotations.json", f'{images_path}/test_coco')
test_dataset = DatasetCatalog.get("test_coco")
test_metadata = MetadataCatalog.get("test_coco")
from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_test_loader, build_detection_train_loader
from detectron2.data import detection_utils as utils
import detectron2.data.transforms as T
import copy
def custom_mapper(dataset_dict):
dataset_dict = copy.deepcopy(dataset_dict)
image = utils.read_image(dataset_dict["file_name"], format="BGR")
transform_list = [
T.RandomBrightness(0.9, 1.1),
T.RandomContrast(0.9, 1.1),
T.RandomSaturation(0.9, 1.1),
T.RandomFlip(prob=0.5, horizontal=False, vertical=True),
T.RandomFlip(prob=0.5, horizontal=True, vertical=False),
T.RandomCrop("relative", (0.4, 0.4))
]
image, transforms = T.apply_transform_gens(transform_list, image)
dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32"))
annos = [
utils.transform_instance_annotations(obj, transforms, image.shape[:2])
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(annos, image.shape[:2])
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
class AugTrainer(DefaultTrainer):
@classmethod
def build_train_loader(cls, cfg):
return build_detection_train_loader(cfg, mapper=custom_mapper)
# If first training
cfg = get_cfg()
cfg.OUTPUT_DIR = f'{project_path}/model/best'
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
cfg.merge_from_file(model_zoo.get_config_file('COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml'))
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
cfg.DATASETS.TRAIN = ("train_coco",)
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 2
cfg.SOLVER.IMS_PER_BATCH = 6
cfg.SOLVER.BASE_LR = 0.002
cfg.SOLVER.MAX_ITER = (300)
cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 2
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = (128)
trainer = AugTrainer(cfg)
checkpointer = DetectionCheckpointer(trainer.model, save_dir=cfg.OUTPUT_DIR)
# Train
import os
cfg.SOLVER.MAX_ITER = (300)
trainer.resume_or_load(resume=False)
trainer.resume_or_load()
trainer.train()
from detectron2.utils.visualizer import ColorMode
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
predictor = DefaultPredictor(cfg)
#im = cv2.imread(f'{images_path}/de frente.jpeg')
im = cv2.imread(f'{images_path}/test_coco/JPEGImages/1_3.jpg')
print(test_metadata.thing_classes)
outputs = predictor(im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format
v = Visualizer(im[:, :, ::-1],
metadata=train_metadata,
scale=0.5,
instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models
)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2_imshow(out.get_image()[:, :, ::-1])
```
| github_jupyter |
```
import pandas as pd
import numpy as np
#Loading the dataset:
data = pd.read_csv("AB_NYC_2019.csv")
data.head()
```
### Features
For the rest of the homework, you'll need to use the features from the previous homework with additional two `'neighbourhood_group'` and `'room_type'`. So the whole feature set will be set as follows:
* `'neighbourhood_group'`,
* `'room_type'`,
* `'latitude'`,
* `'longitude'`,
* `'price'`,
* `'minimum_nights'`,
* `'number_of_reviews'`,
* `'reviews_per_month'`,
* `'calculated_host_listings_count'`,
* `'availability_365'`
Select only them and fill in the missing values with 0.
```
new_data = data[['neighbourhood_group','room_type','latitude', 'longitude', 'price', 'minimum_nights','number_of_reviews', 'reviews_per_month', 'calculated_host_listings_count','availability_365']]
new_data.info()
new_data.isnull().sum()
new_data['reviews_per_month']= new_data['reviews_per_month'].fillna(0)
new_data.isnull().sum()
```
### Question 1
What is the most frequent observation (mode) for the column `'neighbourhood_group'`?
```
print('The most frequent observation for the column neighbourhood_group is', new_data['neighbourhood_group'].mode())
```
### Split the data
* Split your data in train/val/test sets, with 60%/20%/20% distribution.
* Use Scikit-Learn for that (the `train_test_split` function) and set the seed to 42.
* Make sure that the target value ('price') is not in your dataframe.
```
from sklearn.model_selection import train_test_split
X = new_data.drop(['price'], axis=1)
y = new_data["price"]
X_full_train, X_test, y_full_train, y_test=train_test_split(X, y, test_size = 0.2, random_state=42) #Data is divided only 2 parts with this code(80% for train, 20% for test)
X_train, X_val, y_train, y_val=train_test_split(X_full_train, y_full_train, test_size = 0.2, random_state=42) #Now, train data is divided 2 parts to create validation set
X_train = X_train.reset_index(drop=True)
X_val = X_val.reset_index(drop=True)
X_test = X_val.reset_index(drop=True)
```
### Question 2
* Create the [correlation matrix](https://www.google.com/search?q=correlation+matrix) for the numerical features of your train dataset.
* In a correlation matrix, you compute the correlation coefficient between every pair of features in the dataset.
* What are the two features that have the biggest correlation in this dataset?
```
X_train.corr()
```
The *number of reviews* and *reviews_per_month* has the highest correlation score as 0.59.
### Make price binary
* We need to turn the price variable from numeric into binary.
* Let's create a variable `above_average` which is `1` if the price is above (or equal to) `152`.
```
y_train =pd.DataFrame(y_train)
y_train1 = y_train #not to lose original train set with price
y_train1['above_average'] = y_train1['price'] >= 152
y_train1
y_train1['above_average'] = y_train1.above_average.astype(int)
y_train1
y_val =pd.DataFrame(y_val)
y_val1 = y_val
y_val1['above_average'] = y_val1['price'] >= 152
y_val1['above_average'] = y_val1.above_average.astype(int)
y_val1
y_test =pd.DataFrame(y_test)
y_test1 = y_test
y_test1['above_average'] = y_test1['price'] >= 152
y_test1['above_average'] = y_test1.above_average.astype(int)
y_test1
```
### Question 3
* Calculate the mutual information score with the (binarized) price for the two categorical variables that we have. Use the training set only.
* Which of these two variables has bigger score?
* Round it to 2 decimal digits using `round(score, 2)`
```
from sklearn.metrics import mutual_info_score
round(mutual_info_score(X_train.room_type, y_train1.above_average),2)
round(mutual_info_score(X_train.neighbourhood_group, y_train1.above_average),2)
```
Room type has the bigger mutual score with binarized price variable.
### Question 4
* Now let's train a logistic regression
* Remember that we have two categorical variables in the data. Include them using one-hot encoding.
* Fit the model on the training dataset.
* To make sure the results are reproducible across different versions of Scikit-Learn, fit the model with these parameters:
* `model = LogisticRegression(solver='liblinear', C=1.0, random_state=42)`
* Calculate the accuracy on the validation dataset and rount it to 2 decimal digits.
```
new_data.columns
categorical = ['neighbourhood_group', 'room_type']
numerical = [ 'latitude', 'longitude',
'minimum_nights', 'number_of_reviews', 'reviews_per_month',
'calculated_host_listings_count', 'availability_365']
#ONE HOT ENCODING
from sklearn.feature_extraction import DictVectorizer
train_dict = X_train[categorical + numerical].to_dict(orient='records')
train_dict[0]
dv = DictVectorizer(sparse=False)
dv.fit(train_dict)
X_train = dv.transform(train_dict)
print(X_train.shape)
print(X_train)
dv.get_feature_names()
y_train1 = y_train1[['above_average']]
y_train1
#TRAINING LOGISTIC REGRESSION
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(solver='liblinear', C=1.0, random_state=42)
model.fit(X_train, y_train1)
val_dict = X_val[categorical + numerical].to_dict(orient='records')
dv = DictVectorizer(sparse=False)
dv.fit(val_dict)
X_val = dv.transform(val_dict)
X_val.shape
print(y_val)
y_val1 = y_val[['above_average']]
from sklearn.metrics import accuracy_score
y_pred = model.predict(X_val)
round(accuracy_score(y_val1,y_pred),2)
```
### Question 5
* We have 9 features: 7 numerical features and 2 categorical.
* Let's find the least useful one using the *feature elimination* technique.
* Train a model with all these features (using the same parameters as in Q4).
* Now exclude each feature from this set and train a model without it. Record the accuracy for each model.
* For each feature, calculate the difference between the original accuracy and the accuracy without the feature.
* Which of following feature has the smallest difference?
* `neighbourhood_group`
* `room_type`
* `number_of_reviews`
* `reviews_per_month`
> **note**: the difference doesn't have to be positive
```
#Model without neighbourhood_group
model1 = LogisticRegression(solver='liblinear', C=1.0, random_state=42)
model1.fit(np.delete(X_train, [5,6,7,8,9], 1), y_train1)
y_val1 = y_val1[['above_average']]
y_pred1 = model1.predict(np.delete(X_val, [5,6,7,8,9], 1))
round(accuracy_score(y_val1,y_pred1),2)
#Model without room_type
model1 = LogisticRegression(solver='liblinear', C=1.0, random_state=42)
model1.fit(np.delete(X_train, [12,13,14], 1), y_train1)
y_pred1 = model1.predict(np.delete(X_val, [12,13,14], 1))
round(accuracy_score(y_val1,y_pred1),2)
#Model without number_of_reviews
model1 = LogisticRegression(solver='liblinear', C=1.0, random_state=42)
model1.fit(np.delete(X_train, 10, 1), y_train1)
y_pred1 = model1.predict(np.delete(X_val, 10, 1))
round(accuracy_score(y_val1,y_pred1),2)
#Model without reviews_per_month
model1 = LogisticRegression(solver='liblinear', C=1.0, random_state=42)
model1.fit(np.delete(X_train, 11, 1), y_train1)
y_pred1 = model1.predict(np.delete(X_val, 11, 1))
round(accuracy_score(y_val1,y_pred1),2)
```
number_of_reviews and reviews_per_month does not change the global accuracy.
### Question 6
* For this question, we'll see how to use a linear regression model from Scikit-Learn
* We'll need to use the original column `'price'`. Apply the logarithmic transformation to this column.
* Fit the Ridge regression model on the training data.
* This model has a parameter `alpha`. Let's try the following values: `[0, 0.01, 0.1, 1, 10]`
* Which of these alphas leads to the best RMSE on the validation set? Round your RMSE scores to 3 decimal digits.
If there are multiple options, select the smallest `alpha`.
```
from sklearn.linear_model import Ridge
def rmse(y, y_pred):
error = y - y_pred
se = error ** 2
mse = se.mean()
return np.sqrt(mse)
y_train = pd.DataFrame(y_train)
y_train
#Log Transformation on Price
y_train = np.log(y_train['price'])
y_train
y_train=pd.DataFrame(y_train)
y_train
y_val = np.log(y_val['price'])
y_val=pd.DataFrame(y_val)
y_val
y_test = np.log(y_test['price'])
y_test=pd.DataFrame(y_test)
y_test
X_train = pd.DataFrame(X_train)
X_train
#Ridge Regression
from sklearn.linear_model import Ridge
for a in [0, 0.01, 0.1, 1, 10]:
clf = Ridge(alpha=a)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_val)
rmse_score = rmse(y_val, y_pred)
print('RMSE for',a,'is', rmse_score)
```
All RMSEs are very close to each other, however the minimum one belongs to alpha=0.01
| github_jupyter |
# Artificial Intelligence Nanodegree
## Convolutional Neural Networks
---
In this notebook, we train an MLP to classify images from the MNIST database.
### 1. Load MNIST Database
```
from keras.datasets import mnist
# use Keras to import pre-shuffled MNIST database
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print("The MNIST database has a training set of %d examples." % len(X_train))
print("The MNIST database has a test set of %d examples." % len(X_test))
```
### 2. Visualize the First Six Training Images
```
import matplotlib.pyplot as plt
%matplotlib inline
import matplotlib.cm as cm
import numpy as np
# plot first six training images
fig = plt.figure(figsize=(20,20))
for i in range(6):
ax = fig.add_subplot(1, 6, i+1, xticks=[], yticks=[])
ax.imshow(X_train[i], cmap='gray')
ax.set_title(str(y_train[i]))
```
### 3. View an Image in More Detail
```
def visualize_input(img, ax):
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
ax.annotate(str(round(img[x][y],2)), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
visualize_input(X_train[0], ax)
```
### 4. Rescale the Images by Dividing Every Pixel in Every Image by 255
```
# rescale [0,255] --> [0,1]
X_train = X_train.astype('float32')/255
X_test = X_test.astype('float32')/255
```
### 5. Encode Categorical Integer Labels Using a One-Hot Scheme
```
from keras.utils import np_utils
# print first ten (integer-valued) training labels
print('Integer-valued labels:')
print(y_train[:10])
# one-hot encode the labels
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# print first ten (one-hot) training labels
print('One-hot labels:')
print(y_train[:10])
```
### 6. Define the Model Architecture
```
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
# define the model
model = Sequential()
model.add(Flatten(input_shape=X_train.shape[1:]))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
# summarize the model
model.summary()
```
### 7. Compile the Model
```
# compile the model
model.compile(loss='categorical_crossentropy', optimizer='sgd',
metrics=['accuracy'])
```
### 8. Calculate the Classification Accuracy on the Test Set (Before Training)
```
# evaluate test accuracy
score = model.evaluate(X_test, y_test, verbose=0)
accuracy = 100*score[1]
# print test accuracy
print('Test accuracy: %.4f%%' % accuracy)
```
### 9. Train the Model
```
from keras.callbacks import ModelCheckpoint
import time
start = time.time()
# train the model
checkpointer = ModelCheckpoint(filepath='mnist.model.best.hdf5',
verbose=1, save_best_only=True)
hist = model.fit(X_train, y_train, batch_size=512, epochs=10,
validation_split=0.2, callbacks=[checkpointer],
verbose=2, shuffle=True)
training_duration = time.time() - start
print(training_duration)
```
### 10. Load the Model with the Best Classification Accuracy on the Validation Set
```
# load the weights that yielded the best validation accuracy
model.load_weights('mnist.model.best.hdf5')
```
### 11. Calculate the Classification Accuracy on the Test Set
```
# evaluate test accuracy
score = model.evaluate(X_test, y_test, verbose=0)
accuracy = 100*score[1]
# print test accuracy
print('Test accuracy: %.4f%%' % accuracy)
```
| github_jupyter |
```
from __future__ import absolute_import
import sys
import os
try:
from dotenv import find_dotenv, load_dotenv
except:
pass
import argparse
try:
sys.path.append(os.path.join(os.path.dirname(__file__), '../src'))
except:
sys.path.append(os.path.join(os.getcwd(), '../src'))
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torchcontrib.optim import SWA
from torch.optim import Adam, SGD
from torch.optim.lr_scheduler import CosineAnnealingLR, ReduceLROnPlateau, CyclicLR, \
CosineAnnealingWarmRestarts
from consNLP.data import load_data, data_utils, fetch_dataset
from consNLP.models import transformer_models, activations, layers, losses, scorers
from consNLP.visualization import visualize
from consNLP.trainer.trainer import BasicTrainer, PLTrainer, test_pl_trainer
from consNLP.trainer.trainer_utils import set_seed, _has_apex, _torch_lightning_available, _has_wandb, _torch_gpu_available, _num_gpus, _torch_tpu_available
from consNLP.preprocessing.custom_tokenizer import BERTweetTokenizer
if _has_apex:
#from torch.cuda import amp
from apex import amp
if _torch_tpu_available:
import torch_xla
import torch_xla.core.xla_model as xm
import torch_xla.distributed.xla_multiprocessing as xmp
if _has_wandb:
import wandb
try:
load_dotenv(find_dotenv())
wandb.login(key=os.environ['WANDB_API_KEY'])
except:
_has_wandb = False
if _torch_lightning_available:
import pytorch_lightning as pl
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.metrics.metric import NumpyMetric
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, Callback
import tokenizers
from transformers import AutoModel, AutoTokenizer, AdamW, get_linear_schedule_with_warmup, AutoConfig
load_dotenv(find_dotenv())
fetch_dataset(project_dir='../',download_from_kaggle=True,\
kaggle_dataset='lakshmi25npathi/imdb-dataset-of-50k-movie-reviews')
parser = argparse.ArgumentParser(prog='Torch trainer function',conflict_handler='resolve')
parser.add_argument('--train_data', type=str, default='../data/raw/IMDB Dataset.csv', required=False,
help='train data')
parser.add_argument('--val_data', type=str, default='', required=False,
help='validation data')
parser.add_argument('--test_data', type=str, default=None, required=False,
help='test data')
parser.add_argument('--task_type', type=str, default='binary_sequence_classification', required=False,
help='type of task')
parser.add_argument('--transformer_model_pretrained_path', type=str, default='roberta-base', required=False,
help='transformer model pretrained path or huggingface model name')
parser.add_argument('--transformer_config_path', type=str, default='roberta-base', required=False,
help='transformer config file path or huggingface model name')
parser.add_argument('--transformer_tokenizer_path', type=str, default='roberta-base', required=False,
help='transformer tokenizer file path or huggingface model name')
parser.add_argument('--bpe_vocab_path', type=str, default='', required=False,
help='bytepairencoding vocab file path')
parser.add_argument('--bpe_merges_path', type=str, default='', required=False,
help='bytepairencoding merges file path')
parser.add_argument('--berttweettokenizer_path', type=str, default='', required=False,
help='BERTweet tokenizer path')
parser.add_argument('--max_text_len', type=int, default=100, required=False,
help='maximum length of text')
parser.add_argument('--epochs', type=int, default=5, required=False,
help='number of epochs')
parser.add_argument('--lr', type=float, default=.00003, required=False,
help='learning rate')
parser.add_argument('--loss_function', type=str, default='bcelogit', required=False,
help='loss function')
parser.add_argument('--metric', type=str, default='f1', required=False,
help='scorer metric')
parser.add_argument('--use_lightning_trainer', type=bool, default=False, required=False,
help='if lightning trainer needs to be used')
parser.add_argument('--use_torch_trainer', type=bool, default=True, required=False,
help='if custom torch trainer needs to be used')
parser.add_argument('--use_apex', type=bool, default=False, required=False,
help='if apex needs to be used')
parser.add_argument('--use_gpu', type=bool, default=False, required=False,
help='GPU mode')
parser.add_argument('--use_TPU', type=bool, default=False, required=False,
help='TPU mode')
parser.add_argument('--num_gpus', type=int, default=0, required=False,
help='Number of GPUs')
parser.add_argument('--num_tpus', type=int, default=0, required=False,
help='Number of TPUs')
parser.add_argument('--train_batch_size', type=int, default=16, required=False,
help='train batch size')
parser.add_argument('--eval_batch_size', type=int, default=16, required=False,
help='eval batch size')
parser.add_argument('--model_save_path', type=str, default='../models/sentiment_classification/', required=False,
help='seed')
parser.add_argument('--wandb_logging', type=bool, default=False, required=False,
help='wandb logging needed')
parser.add_argument('--seed', type=int, default=42, required=False,
help='seed')
args, _ = parser.parse_known_args()
print ("Wandb Logging: {}, GPU: {}, Pytorch Lightning: {}, TPU: {}, Apex: {}".format(\
_has_wandb and args.wandb_logging, _torch_gpu_available,\
_torch_lightning_available and args.use_lightning_trainer, _torch_tpu_available, _has_apex))
reshape = False
final_activation = None
convert_output = None
if args.task_type == 'binary_sequence_classification':
if args.metric != 'roc_auc_score':
convert_output = 'round'
if args.loss_function == 'bcelogit':
final_activation = 'sigmoid'
elif args.task_type == 'multiclass_sequence_classification':
convert_output = 'max'
elif args.task_type == 'binary_token_classification':
reshape = True
if args.metric != 'roc_auc_score':
convert_output = 'round'
if args.loss_function == 'bcelogit':
final_activation = 'sigmoid'
elif args.task_type == 'multiclass_token_classification':
reshape = True
convert_output = 'max'
df = load_data.load_pandas_df(args.train_data,sep=',')
df = df.iloc[:1000]
df.head(5)
model_save_dir = args.model_save_path
try:
os.makedirs(model_save_dir)
except OSError:
pass
df.sentiment, label2idx = data_utils.convert_categorical_label_to_int(df.sentiment, \
save_path=os.path.join(model_save_dir,'label2idx.pkl'))
df.head(5)
from sklearn.model_selection import KFold
kf = KFold(5)
for train_index, val_index in kf.split(df.review, df.sentiment):
break
train_df = df.iloc[train_index].reset_index(drop=True)
val_df = df.iloc[val_index].reset_index(drop=True)
train_df.shape, val_df.shape
if args.berttweettokenizer_path:
tokenizer = BERTweetTokenizer(args.berttweettokenizer_path)
else:
tokenizer = AutoTokenizer.from_pretrained(args.transformer_model_pretrained_path)
if not args.berttweettokenizer_path:
try:
bpetokenizer = tokenizers.ByteLevelBPETokenizer(args.bpe_vocab_path, \
args.bpe_merges_path)
except:
bpetokenizer = None
else:
bpetokenizer = None
train_dataset = data_utils.TransformerDataset(train_df.review, bpetokenizer=bpetokenizer, tokenizer=tokenizer, MAX_LEN=args.max_text_len, \
target_label=train_df.sentiment, sequence_target=False, target_text=None, conditional_label=None, conditional_all_labels=None)
val_dataset = data_utils.TransformerDataset(val_df.review, bpetokenizer=bpetokenizer, tokenizer=tokenizer, MAX_LEN=args.max_text_len, \
target_label=val_df.sentiment, sequence_target=False, target_text=None, conditional_label=None, conditional_all_labels=None)
config = AutoConfig.from_pretrained(args.transformer_config_path, output_hidden_states=True, output_attentions=True)
basemodel = AutoModel.from_pretrained(args.transformer_model_pretrained_path,config=config)
model = transformer_models.TransformerWithCLS(basemodel)
if _torch_tpu_available and args.use_TPU:
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset,
num_replicas=xm.xrt_world_size(),
rank=xm.get_ordinal(),
shuffle=True
)
val_sampler = torch.utils.data.distributed.DistributedSampler(
val_dataset,
num_replicas=xm.xrt_world_size(),
rank=xm.get_ordinal(),
shuffle=False
)
if _torch_tpu_available and args.use_TPU:
train_data_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.train_batch_size, sampler=train_sampler,
drop_last=True,num_workers=2)
val_data_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.eval_batch_size, sampler=val_sampler,
drop_last=False,num_workers=1)
else:
train_data_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.train_batch_size)
val_data_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.eval_batch_size)
```
### Run with Pytorch Trainer
```
if args.use_torch_trainer:
device = torch.device("cuda" if _torch_gpu_available and args.use_gpu else "cpu")
if _torch_tpu_available and args.use_TPU:
device=xm.xla_device()
print ("Device: {}".format(device))
if args.use_TPU and _torch_tpu_available and args.num_tpus > 1:
train_data_loader = torch_xla.distributed.parallel_loader.ParallelLoader(train_data_loader, [device])
train_data_loader = train_data_loader.per_device_loader(device)
trainer = BasicTrainer(model, train_data_loader, val_data_loader, device, args.transformer_model_pretrained_path, \
final_activation=final_activation, \
test_data_loader=val_data_loader)
param_optimizer = list(trainer.model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.001,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
num_train_steps = int(len(train_data_loader) * args.epochs)
if _torch_tpu_available and args.use_TPU:
optimizer = AdamW(optimizer_parameters, lr=args.lr*xm.xrt_world_size())
else:
optimizer = AdamW(optimizer_parameters, lr=args.lr)
if args.use_apex and _has_apex:
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=num_train_steps)
loss = losses.get_loss(args.loss_function)
scorer = scorers.SKMetric(args.metric, convert=convert_output, reshape=reshape)
def _mp_fn(rank, flags, trainer, epochs, lr, metric, loss_function, optimizer, scheduler, model_save_path, num_gpus, num_tpus, \
max_grad_norm, early_stopping_rounds, snapshot_ensemble, is_amp, use_wandb, seed):
torch.set_default_tensor_type('torch.FloatTensor')
a = trainer.train(epochs, lr, metric, loss_function, optimizer, scheduler, model_save_path, num_gpus, num_tpus, \
max_grad_norm, early_stopping_rounds, snapshot_ensemble, is_amp, use_wandb, seed)
FLAGS = {}
if _torch_tpu_available and args.use_TPU:
xmp.spawn(_mp_fn, args=(FLAGS, trainer, args.epochs, args.lr, scorer, loss, optimizer, scheduler, args.model_save_path, args.num_gpus, args.num_tpus, \
1, 3, False, args.use_apex, False, args.seed), nprocs=8, start_method='fork')
else:
use_wandb = _has_wandb and args.wandb_logging
trainer.train(args.epochs, args.lr, scorer, loss, optimizer, scheduler, args.model_save_path, args.num_gpus, args.num_tpus, \
max_grad_norm=1, early_stopping_rounds=3, snapshot_ensemble=False, is_amp=args.use_apex, use_wandb=use_wandb, seed=args.seed)
elif args.use_lightning_trainer and _torch_lightning_available:
from pytorch_lightning import Trainer, seed_everything
seed_everything(args.seed)
loss = losses.get_loss(args.loss_function)
scorer = scorers.PLMetric(args.metric, convert=convert_output, reshape=reshape)
log_args = {'description': args.transformer_model_pretrained_path, 'loss': loss.__class__.__name__, 'epochs': args.epochs, 'learning_rate': args.lr}
if _has_wandb and not _torch_tpu_available and args.wandb_logging:
wandb.init(project="Project",config=log_args)
wandb_logger = WandbLogger()
checkpoint_callback = ModelCheckpoint(
filepath=args.model_save_path,
save_top_k=1,
verbose=True,
monitor='val_loss',
mode='min'
)
earlystop = EarlyStopping(
monitor='val_loss',
patience=3,
verbose=False,
mode='min'
)
if args.use_gpu and _torch_gpu_available:
print ("using GPU")
if args.wandb_logging:
if _has_apex:
trainer = Trainer(gpus=args.num_gpus, max_epochs=args.epochs, logger=wandb_logger, precision=16, \
checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
else:
trainer = Trainer(gpus=args.num_gpus, max_epochs=args.epochs, logger=wandb_logger, \
checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
else:
if _has_apex:
trainer = Trainer(gpus=args.num_gpus, max_epochs=args.epochs, precision=16, \
checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
else:
trainer = Trainer(gpus=args.num_gpus, max_epochs=args.epochs, \
checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
elif args.use_TPU and _torch_tpu_available:
print ("using TPU")
if _has_apex:
trainer = Trainer(num_tpu_cores=args.num_tpus, max_epochs=args.epochs, precision=16, \
checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
else:
trainer = Trainer(num_tpu_cores=args.num_tpus, max_epochs=args.epochs, \
checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
else:
print ("using CPU")
if args.wandb_logging:
if _has_apex:
trainer = Trainer(max_epochs=args.epochs, logger=wandb_logger, precision=16, \
checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
else:
trainer = Trainer(max_epochs=args.epochs, logger=wandb_logger, \
checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
else:
if _has_apex:
trainer = Trainer(max_epochs=args.epochs, precision=16, \
checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
else:
trainer = Trainer(max_epochs=args.epochs, checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
num_train_steps = int(len(train_data_loader) * args.epochs)
pltrainer = PLTrainer(num_train_steps, model, scorer, loss, args.lr, \
final_activation=final_activation, seed=42)
#try:
# print ("Loaded model from previous checkpoint")
# pltrainer = PLTrainer.load_from_checkpoint(args.model_save_path)
#except:
# pass
trainer.fit(pltrainer, train_data_loader, val_data_loader)
test_output1 = trainer.test_output
```
### Run with Pytorch Lightning Trainer
```
parser = argparse.ArgumentParser(prog='Torch trainer function',conflict_handler='resolve')
parser.add_argument('--train_data', type=str, default='../data/raw/IMDB Dataset.csv', required=False,
help='train data')
parser.add_argument('--val_data', type=str, default='', required=False,
help='validation data')
parser.add_argument('--test_data', type=str, default=None, required=False,
help='test data')
parser.add_argument('--transformer_model_pretrained_path', type=str, default='roberta-base', required=False,
help='transformer model pretrained path or huggingface model name')
parser.add_argument('--transformer_config_path', type=str, default='roberta-base', required=False,
help='transformer config file path or huggingface model name')
parser.add_argument('--transformer_tokenizer_path', type=str, default='roberta-base', required=False,
help='transformer tokenizer file path or huggingface model name')
parser.add_argument('--bpe_vocab_path', type=str, default='', required=False,
help='bytepairencoding vocab file path')
parser.add_argument('--bpe_merges_path', type=str, default='', required=False,
help='bytepairencoding merges file path')
parser.add_argument('--berttweettokenizer_path', type=str, default='', required=False,
help='BERTweet tokenizer path')
parser.add_argument('--max_text_len', type=int, default=100, required=False,
help='maximum length of text')
parser.add_argument('--epochs', type=int, default=5, required=False,
help='number of epochs')
parser.add_argument('--lr', type=float, default=.00003, required=False,
help='learning rate')
parser.add_argument('--loss_function', type=str, default='bcelogit', required=False,
help='loss function')
parser.add_argument('--metric', type=str, default='f1', required=False,
help='scorer metric')
parser.add_argument('--use_lightning_trainer', type=bool, default=True, required=False,
help='if lightning trainer needs to be used')
parser.add_argument('--use_torch_trainer', type=bool, default=False, required=False,
help='if custom torch trainer needs to be used')
parser.add_argument('--use_apex', type=bool, default=False, required=False,
help='if apex needs to be used')
parser.add_argument('--use_gpu', type=bool, default=False, required=False,
help='GPU mode')
parser.add_argument('--use_TPU', type=bool, default=False, required=False,
help='TPU mode')
parser.add_argument('--num_gpus', type=int, default=0, required=False,
help='Number of GPUs')
parser.add_argument('--num_tpus', type=int, default=0, required=False,
help='Number of TPUs')
parser.add_argument('--train_batch_size', type=int, default=16, required=False,
help='train batch size')
parser.add_argument('--eval_batch_size', type=int, default=16, required=False,
help='eval batch size')
parser.add_argument('--model_save_path', type=str, default='../models/sentiment_classification/', required=False,
help='seed')
parser.add_argument('--wandb_logging', type=bool, default=False, required=False,
help='wandb logging needed')
parser.add_argument('--seed', type=int, default=42, required=False,
help='seed')
args, _ = parser.parse_known_args()
print ("Wandb Logging: {}, GPU: {}, Pytorch Lightning: {}, TPU: {}, Apex: {}".format(\
_has_wandb and args.wandb_logging, _torch_gpu_available,\
_torch_lightning_available and args.use_lightning_trainer, _torch_tpu_available, _has_apex))
if args.use_torch_trainer:
device = torch.device("cuda" if _torch_gpu_available and args.use_gpu else "cpu")
if _torch_tpu_available and args.use_TPU:
device=xm.xla_device()
print ("Device: {}".format(device))
if args.use_TPU and _torch_tpu_available and args.num_tpus > 1:
train_data_loader = torch_xla.distributed.parallel_loader.ParallelLoader(train_data_loader, [device])
train_data_loader = train_data_loader.per_device_loader(device)
trainer = BasicTrainer(model, train_data_loader, val_data_loader, device, args.transformer_model_pretrained_path, \
final_activation=final_activation, \
test_data_loader=val_data_loader)
param_optimizer = list(trainer.model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.001,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
num_train_steps = int(len(train_data_loader) * args.epochs)
if _torch_tpu_available and args.use_TPU:
optimizer = AdamW(optimizer_parameters, lr=args.lr*xm.xrt_world_size())
else:
optimizer = AdamW(optimizer_parameters, lr=args.lr)
if args.use_apex and _has_apex:
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=num_train_steps)
loss = losses.get_loss(args.loss_function)
scorer = scorers.SKMetric(args.metric, convert=convert_output, reshape=reshape)
def _mp_fn(rank, flags, trainer, epochs, lr, metric, loss_function, optimizer, scheduler, model_save_path, num_gpus, num_tpus, \
max_grad_norm, early_stopping_rounds, snapshot_ensemble, is_amp, use_wandb, seed):
torch.set_default_tensor_type('torch.FloatTensor')
a = trainer.train(epochs, lr, metric, loss_function, optimizer, scheduler, model_save_path, num_gpus, num_tpus, \
max_grad_norm, early_stopping_rounds, snapshot_ensemble, is_amp, use_wandb, seed)
FLAGS = {}
if _torch_tpu_available and args.use_TPU:
xmp.spawn(_mp_fn, args=(FLAGS, trainer, args.epochs, args.lr, scorer, loss, optimizer, scheduler, args.model_save_path, args.num_gpus, args.num_tpus, \
1, 3, False, args.use_apex, False, args.seed), nprocs=8, start_method='fork')
else:
use_wandb = _has_wandb and args.wandb_logging
trainer.train(args.epochs, args.lr, scorer, loss, optimizer, scheduler, args.model_save_path, args.num_gpus, args.num_tpus, \
max_grad_norm=1, early_stopping_rounds=3, snapshot_ensemble=False, is_amp=args.use_apex, use_wandb=use_wandb, seed=args.seed)
elif args.use_lightning_trainer and _torch_lightning_available:
from pytorch_lightning import Trainer, seed_everything
seed_everything(args.seed)
loss = losses.get_loss(args.loss_function)
scorer = scorers.PLMetric(args.metric, convert=convert_output, reshape=reshape)
log_args = {'description': args.transformer_model_pretrained_path, 'loss': loss.__class__.__name__, 'epochs': args.epochs, 'learning_rate': args.lr}
if _has_wandb and not _torch_tpu_available and args.wandb_logging:
wandb.init(project="Project",config=log_args)
wandb_logger = WandbLogger()
checkpoint_callback = ModelCheckpoint(
filepath=args.model_save_path,
save_top_k=1,
verbose=True,
monitor='val_loss',
mode='min'
)
earlystop = EarlyStopping(
monitor='val_loss',
patience=3,
verbose=False,
mode='min'
)
if args.use_gpu and _torch_gpu_available:
print ("using GPU")
if args.wandb_logging:
if _has_apex:
trainer = Trainer(gpus=args.num_gpus, max_epochs=args.epochs, logger=wandb_logger, precision=16, \
checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
else:
trainer = Trainer(gpus=args.num_gpus, max_epochs=args.epochs, logger=wandb_logger, \
checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
else:
if _has_apex:
trainer = Trainer(gpus=args.num_gpus, max_epochs=args.epochs, precision=16, \
checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
else:
trainer = Trainer(gpus=args.num_gpus, max_epochs=args.epochs, \
checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
elif args.use_TPU and _torch_tpu_available:
print ("using TPU")
if _has_apex:
trainer = Trainer(num_tpu_cores=args.num_tpus, max_epochs=args.epochs, precision=16, \
checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
else:
trainer = Trainer(num_tpu_cores=args.num_tpus, max_epochs=args.epochs, \
checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
else:
print ("using CPU")
if args.wandb_logging:
if _has_apex:
trainer = Trainer(max_epochs=args.epochs, logger=wandb_logger, precision=16, \
checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
else:
trainer = Trainer(max_epochs=args.epochs, logger=wandb_logger, \
checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
else:
if _has_apex:
trainer = Trainer(max_epochs=args.epochs, precision=16, \
checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
else:
trainer = Trainer(max_epochs=args.epochs, checkpoint_callback=checkpoint_callback, callbacks=[earlystop])
num_train_steps = int(len(train_data_loader) * args.epochs)
pltrainer = PLTrainer(num_train_steps, model, scorer, loss, args.lr, \
final_activation=final_activation, seed=42)
#try:
# print ("Loaded model from previous checkpoint")
# pltrainer = PLTrainer.load_from_checkpoint(args.model_save_path)
#except:
# pass
trainer.fit(pltrainer, train_data_loader, val_data_loader)
from tqdm import tqdm
test_output2 = []
for val_batch in tqdm(val_data_loader):
out = torch.sigmoid(pltrainer(val_batch)).detach().cpu().numpy()
test_output2.extend(out[:,0].tolist())
#test_output2 = np.concatenate(test_output2)
test_output1 = np.array(test_output1)[:,0]
test_output2 = np.array(test_output2)
np.corrcoef(test_output1,test_output2)
```
| github_jupyter |
# Visualization
PySwarms implements tools for visualizing the behavior of your swarm. These are built on top of `matplotlib`, thus rendering charts that are easy to use and highly-customizable. However, it must be noted that in order to use the animation capability in PySwarms (and in `matplotlib` for that matter), at least one writer tool must be installed. Some available tools include:
* ffmpeg
* ImageMagick
* MovieWriter (base)
In the following demonstration, the `ffmpeg` tool is used. For Linux and Windows users, it can be installed via:
```shell
$ conda install -c conda-forge ffmpeg
```
```
import sys
sys.path.append('../')
```
First, we need to import the `pyswarms.utils.environments.PlotEnvironment` class. This enables us to use various methods to create animations or plot costs.
```
# Import modules
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation, rc
from IPython.display import HTML
# Import PySwarms
import pyswarms as ps
from pyswarms.utils.functions import single_obj as fx
from pyswarms.utils.environments import PlotEnvironment
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
```
The first step is to create an optimizer. Here, we're going to use Global-best PSO to find the minima of a sphere function. As usual, we simply create an instance of its class `pyswarms.single.GlobalBestPSO` by passing the required parameters that we will use.
```
options = {'c1':0.5, 'c2':0.3, 'w':0.9}
optimizer = ps.single.GlobalBestPSO(n_particles=10, dimensions=3, options=options)
```
## Initializing the `PlotEnvironment`
Think of the `PlotEnvironment` as a container in which various plotting methods can be called. In order to create an instance of this class, we need to pass the optimizer object, the objective function, and the number of iterations needed. The `PlotEnvironment` will then simulate these parameters so as to build the plots.
```
plt_env = PlotEnvironment(optimizer, fx.sphere_func, 1000)
```
## Plotting the cost
To plot the cost, we simply need to call the `plot_cost()` function. There are pre-set defaults in this method already, but we can customize by passing various arguments into it such as figure size, title, x- and y-labels and etc. Furthermore, this method also accepts a keyword argument `**kwargs` similar to `matplotlib`. This enables us to further customize various artists and elements in the plot.
For now, let's stick with the default one. We'll just call the `plot_cost()` and `show()` it.
```
plt_env.plot_cost(figsize=(8,6));
plt.show()
```
## Animating swarms
The `PlotEnvironment()` offers two methods to perform animation, `plot_particles2D()` and `plot_particles3D()`. As its name suggests, these methods plot the particles in a 2-D or 3-D space. You can choose which dimensions will be plotted using the `index` argument, but the default takes the first 2 (or first three in 3D) indices of your swarm dimension.
Each animation method returns a `matplotlib.animation.Animation` class that still needs to be animated by a `Writer` class (thus necessitating the installation of a writer module). For the proceeding examples, we will convert the animations into an HTML5 video. In such case, we need to invoke some extra methods to do just that.
```
# equivalent to rcParams['animation.html'] = 'html5'
# See http://louistiao.me/posts/notebooks/save-matplotlib-animations-as-gifs/
rc('animation', html='html5')
```
### Plotting in 2-D space
```
HTML(plt_env.plot_particles2D(limits=((-1.2,1.2),(-1.2,1.2))).to_html5_video())
```
### Plotting in 3-D space
```
HTML(plt_env.plot_particles3D(limits=((-1.2,1.2),(-1.2,1.2),(-1.2,1.2))).to_html5_video())
```
| github_jupyter |
# Amazon SageMaker Experiment Trials for Distirbuted Training of Mask-RCNN
This notebook is a step-by-step tutorial on Amazon SageMaker Experiment Trials for distributed tranining of [Mask R-CNN](https://arxiv.org/abs/1703.06870) implemented in [TensorFlow](https://www.tensorflow.org/) framework.
Concretely, we will describe the steps for SagerMaker Experiment Trials for training [TensorPack Faster-RCNN/Mask-RCNN](https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN) and [AWS Samples Mask R-CNN](https://github.com/aws-samples/mask-rcnn-tensorflow) in [Amazon SageMaker](https://aws.amazon.com/sagemaker/) using [Amazon S3](https://aws.amazon.com/s3/) as data source.
The outline of steps is as follows:
1. Stage COCO 2017 dataset in [Amazon S3](https://aws.amazon.com/s3/)
2. Build SageMaker training image and push it to [Amazon ECR](https://aws.amazon.com/ecr/)
3. Configure data input channels
4. Configure hyper-prarameters
5. Define training metrics
6. Define training job
7. Define SageMaker Experiment Trials to start the training jobs
Before we get started, let us initialize two python variables ```aws_region``` and ```s3_bucket``` that we will use throughout the notebook:
```
aws_region = # aws-region-code e.g. us-east-1
s3_bucket = # your-s3-bucket-name
```
## Stage COCO 2017 dataset in Amazon S3
We use [COCO 2017 dataset](http://cocodataset.org/#home) for training. We download COCO 2017 training and validation dataset to this notebook instance, extract the files from the dataset archives, and upload the extracted files to your Amazon [S3 bucket](https://docs.aws.amazon.com/en_pv/AmazonS3/latest/gsg/CreatingABucket.html) with the prefix ```mask-rcnn/sagemaker/input/train```. The ```prepare-s3-bucket.sh``` script executes this step.
```
!cat ./prepare-s3-bucket.sh
```
Using your *Amazon S3 bucket* as argument, run the cell below. If you have already uploaded COCO 2017 dataset to your Amazon S3 bucket *in this AWS region*, you may skip this step. The expected time to execute this step is 20 minutes.
```
%%time
!./prepare-s3-bucket.sh {s3_bucket}
```
## Build and push SageMaker training images
For this step, the [IAM Role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) attached to this notebook instance needs full access to Amazon ECR service. If you created this notebook instance using the ```./stack-sm.sh``` script in this repository, the IAM Role attached to this notebook instance is already setup with full access to ECR service.
Below, we have a choice of two different implementations:
1. [TensorPack Faster-RCNN/Mask-RCNN](https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN) implementation supports a maximum per-GPU batch size of 1, and does not support mixed precision. It can be used with mainstream TensorFlow releases.
2. [AWS Samples Mask R-CNN](https://github.com/aws-samples/mask-rcnn-tensorflow) is an optimized implementation that supports a maximum batch size of 4 and supports mixed precision. This implementation uses custom TensorFlow ops. The required custom TensorFlow ops are available in [AWS Deep Learning Container](https://github.com/aws/deep-learning-containers/blob/master/available_images.md) images in ```tensorflow-training``` repository with image tag ```1.15.2-gpu-py36-cu100-ubuntu18.04```, or later.
It is recommended that you build and push both SageMaker training images and use either image for training later.
### TensorPack Faster-RCNN/Mask-RCNN
Use ```./container/build_tools/build_and_push.sh``` script to build and push the TensorPack Faster-RCNN/Mask-RCNN training image to Amazon ECR.
```
!cat ./container/build_tools/build_and_push.sh
```
Using your *AWS region* as argument, run the cell below.
```
%%time
! ./container/build_tools/build_and_push.sh {aws_region}
```
Set ```tensorpack_image``` below to Amazon ECR URI of the image you pushed above.
```
tensorpack_image = # mask-rcnn-tensorpack-sagemaker ECR URI
```
### AWS Samples Mask R-CNN
Use ```./container-optimized/build_tools/build_and_push.sh``` script to build and push the AWS Samples Mask R-CNN training image to Amazon ECR.
```
!cat ./container-optimized/build_tools/build_and_push.sh
```
Using your *AWS region* as argument, run the cell below.
```
%%time
! ./container-optimized/build_tools/build_and_push.sh {aws_region}
```
Set ```aws_samples_image``` below to Amazon ECR URI of the image you pushed above.
```
aws_samples_image = # mask-rcnn-tensorflow-sagemaker ECR URI
```
## SageMaker Initialization
First we upgrade SageMaker to 2.3.0 API. If your notebook is already using latest Sagemaker 2.x API, you may skip the next cell.
```
! pip install --upgrade pip
! pip install sagemaker==2.3.0
```
We have staged the data and we have built and pushed the training docker image to Amazon ECR. Now we are ready to start using Amazon SageMaker.
```
%%time
import os
import time
import boto3
import sagemaker
from sagemaker import get_execution_role
from sagemaker.estimator import Estimator
role = get_execution_role() # provide a pre-existing role ARN as an alternative to creating a new role
print(f'SageMaker Execution Role:{role}')
client = boto3.client('sts')
account = client.get_caller_identity()['Account']
print(f'AWS account:{account}')
session = boto3.session.Session()
region = session.region_name
print(f'AWS region:{region}')
```
Next, we set ```training_image``` to the Amazon ECR image URI you saved in a previous step.
```
training_image = # set to tensorpack_image or aws_samples_image
print(f'Training image: {training_image}')
```
## Define SageMaker Data Channels
Next, we define the *train* data channel using EFS file-system. To do so, we need to specify the EFS file-system id, which is shown in the output of the command below.
```
!df -kh | grep 'fs-' | sed 's/\(fs-[0-9a-z]*\).*/\1/'
```
Set the EFS ```file_system_id``` below to the ouput of the command shown above. In the cell below, we define the `train` data input channel.
```
from sagemaker.inputs import FileSystemInput
# Specify EFS ile system id.
file_system_id = # 'fs-xxxxxxxx'
print(f"EFS file-system-id: {file_system_id}")
# Specify directory path for input data on the file system.
# You need to provide normalized and absolute path below.
file_system_directory_path = '/mask-rcnn/sagemaker/input/train'
print(f'EFS file-system data input path: {file_system_directory_path}')
# Specify the access mode of the mount of the directory associated with the file system.
# Directory must be mounted 'ro'(read-only).
file_system_access_mode = 'ro'
# Specify your file system type
file_system_type = 'EFS'
train = FileSystemInput(file_system_id=file_system_id,
file_system_type=file_system_type,
directory_path=file_system_directory_path,
file_system_access_mode=file_system_access_mode)
```
Next, we define the model output location in S3 bucket.
```
prefix = "mask-rcnn/sagemaker" #prefix in your bucket
s3_output_location = f's3://{s3_bucket}/{prefix}/output'
print(f'S3 model output location: {s3_output_location}')
```
## Configure Hyper-parameters
Next, we define the hyper-parameters.
Note, some hyper-parameters are different between the two implementations. The batch size per GPU in TensorPack Faster-RCNN/Mask-RCNN is fixed at 1, but is configurable in AWS Samples Mask-RCNN. The learning rate schedule is specified in units of steps in TensorPack Faster-RCNN/Mask-RCNN, but in epochs in AWS Samples Mask-RCNN.
The detault learning rate schedule values shown below correspond to training for a total of 24 epochs, at 120,000 images per epoch.
<table align='left'>
<caption>TensorPack Faster-RCNN/Mask-RCNN Hyper-parameters</caption>
<tr>
<th style="text-align:center">Hyper-parameter</th>
<th style="text-align:center">Description</th>
<th style="text-align:center">Default</th>
</tr>
<tr>
<td style="text-align:center">mode_fpn</td>
<td style="text-align:left">Flag to indicate use of Feature Pyramid Network (FPN) in the Mask R-CNN model backbone</td>
<td style="text-align:center">"True"</td>
</tr>
<tr>
<td style="text-align:center">mode_mask</td>
<td style="text-align:left">A value of "False" means Faster-RCNN model, "True" means Mask R-CNN moodel</td>
<td style="text-align:center">"True"</td>
</tr>
<tr>
<td style="text-align:center">eval_period</td>
<td style="text-align:left">Number of epochs period for evaluation during training</td>
<td style="text-align:center">1</td>
</tr>
<tr>
<td style="text-align:center">lr_schedule</td>
<td style="text-align:left">Learning rate schedule in training steps</td>
<td style="text-align:center">'[240000, 320000, 360000]'</td>
</tr>
<tr>
<td style="text-align:center">batch_norm</td>
<td style="text-align:left">Batch normalization option ('FreezeBN', 'SyncBN', 'GN', 'None') </td>
<td style="text-align:center">'FreezeBN'</td>
</tr>
<tr>
<td style="text-align:center">images_per_epoch</td>
<td style="text-align:left">Images per epoch </td>
<td style="text-align:center">120000</td>
</tr>
<tr>
<td style="text-align:center">data_train</td>
<td style="text-align:left">Training data under data directory</td>
<td style="text-align:center">'coco_train2017'</td>
</tr>
<tr>
<td style="text-align:center">data_val</td>
<td style="text-align:left">Validation data under data directory</td>
<td style="text-align:center">'coco_val2017'</td>
</tr>
<tr>
<td style="text-align:center">resnet_arch</td>
<td style="text-align:left">Must be 'resnet50' or 'resnet101'</td>
<td style="text-align:center">'resnet50'</td>
</tr>
<tr>
<td style="text-align:center">backbone_weights</td>
<td style="text-align:left">ResNet backbone weights</td>
<td style="text-align:center">'ImageNet-R50-AlignPadding.npz'</td>
</tr>
<tr>
<td style="text-align:center">load_model</td>
<td style="text-align:left">Pre-trained model to load</td>
<td style="text-align:center"></td>
</tr>
<tr>
<td style="text-align:center">config:</td>
<td style="text-align:left">Any hyperparamter prefixed with <b>config:</b> is set as a model config parameter</td>
<td style="text-align:center"></td>
</tr>
</table>
<table align='left'>
<caption>AWS Samples Mask-RCNN Hyper-parameters</caption>
<tr>
<th style="text-align:center">Hyper-parameter</th>
<th style="text-align:center">Description</th>
<th style="text-align:center">Default</th>
</tr>
<tr>
<td style="text-align:center">mode_fpn</td>
<td style="text-align:left">Flag to indicate use of Feature Pyramid Network (FPN) in the Mask R-CNN model backbone</td>
<td style="text-align:center">"True"</td>
</tr>
<tr>
<td style="text-align:center">mode_mask</td>
<td style="text-align:left">A value of "False" means Faster-RCNN model, "True" means Mask R-CNN moodel</td>
<td style="text-align:center">"True"</td>
</tr>
<tr>
<td style="text-align:center">eval_period</td>
<td style="text-align:left">Number of epochs period for evaluation during training</td>
<td style="text-align:center">1</td>
</tr>
<tr>
<td style="text-align:center">lr_epoch_schedule</td>
<td style="text-align:left">Learning rate schedule in epochs</td>
<td style="text-align:center">'[(16, 0.1), (20, 0.01), (24, None)]'</td>
</tr>
<tr>
<td style="text-align:center">batch_size_per_gpu</td>
<td style="text-align:left">Batch size per gpu ( Minimum 1, Maximum 4)</td>
<td style="text-align:center">4</td>
</tr>
<tr>
<td style="text-align:center">batch_norm</td>
<td style="text-align:left">Batch normalization option ('FreezeBN', 'SyncBN', 'GN', 'None') </td>
<td style="text-align:center">'FreezeBN'</td>
</tr>
<tr>
<td style="text-align:center">images_per_epoch</td>
<td style="text-align:left">Images per epoch </td>
<td style="text-align:center">120000</td>
</tr>
<tr>
<td style="text-align:center">data_train</td>
<td style="text-align:left">Training data under data directory</td>
<td style="text-align:center">'train2017'</td>
</tr>
<tr>
<td style="text-align:center">data_val</td>
<td style="text-align:left">Validation data under data directory</td>
<td style="text-align:center">'val2017'</td>
</tr>
<tr>
<td style="text-align:center">resnet_arch</td>
<td style="text-align:left">Must be 'resnet50' or 'resnet101'</td>
<td style="text-align:center">'resnet50'</td>
</tr>
<tr>
<td style="text-align:center">backbone_weights</td>
<td style="text-align:left">ResNet backbone weights</td>
<td style="text-align:center">'ImageNet-R50-AlignPadding.npz'</td>
</tr>
<tr>
<td style="text-align:center">load_model</td>
<td style="text-align:left">Pre-trained model to load</td>
<td style="text-align:center"></td>
</tr>
<tr>
<td style="text-align:center">config:</td>
<td style="text-align:left">Any hyperparamter prefixed with <b>config:</b> is set as a model config parameter</td>
<td style="text-align:center"></td>
</tr>
</table>
```
hyperparameters = {
"mode_fpn": "True",
"mode_mask": "True",
"eval_period": 1,
"batch_norm": "FreezeBN"
}
```
## Define Training Metrics
Next, we define the regular expressions that SageMaker uses to extract algorithm metrics from training logs and send them to [AWS CloudWatch metrics](https://docs.aws.amazon.com/en_pv/AmazonCloudWatch/latest/monitoring/working_with_metrics.html). These algorithm metrics are visualized in SageMaker console.
```
metric_definitions=[
{
"Name": "fastrcnn_losses/box_loss",
"Regex": ".*fastrcnn_losses/box_loss:\\s*(\\S+).*"
},
{
"Name": "fastrcnn_losses/label_loss",
"Regex": ".*fastrcnn_losses/label_loss:\\s*(\\S+).*"
},
{
"Name": "fastrcnn_losses/label_metrics/accuracy",
"Regex": ".*fastrcnn_losses/label_metrics/accuracy:\\s*(\\S+).*"
},
{
"Name": "fastrcnn_losses/label_metrics/false_negative",
"Regex": ".*fastrcnn_losses/label_metrics/false_negative:\\s*(\\S+).*"
},
{
"Name": "fastrcnn_losses/label_metrics/fg_accuracy",
"Regex": ".*fastrcnn_losses/label_metrics/fg_accuracy:\\s*(\\S+).*"
},
{
"Name": "fastrcnn_losses/num_fg_label",
"Regex": ".*fastrcnn_losses/num_fg_label:\\s*(\\S+).*"
},
{
"Name": "maskrcnn_loss/accuracy",
"Regex": ".*maskrcnn_loss/accuracy:\\s*(\\S+).*"
},
{
"Name": "maskrcnn_loss/fg_pixel_ratio",
"Regex": ".*maskrcnn_loss/fg_pixel_ratio:\\s*(\\S+).*"
},
{
"Name": "maskrcnn_loss/maskrcnn_loss",
"Regex": ".*maskrcnn_loss/maskrcnn_loss:\\s*(\\S+).*"
},
{
"Name": "maskrcnn_loss/pos_accuracy",
"Regex": ".*maskrcnn_loss/pos_accuracy:\\s*(\\S+).*"
},
{
"Name": "mAP(bbox)/IoU=0.5",
"Regex": ".*mAP\\(bbox\\)/IoU=0\\.5:\\s*(\\S+).*"
},
{
"Name": "mAP(bbox)/IoU=0.5:0.95",
"Regex": ".*mAP\\(bbox\\)/IoU=0\\.5:0\\.95:\\s*(\\S+).*"
},
{
"Name": "mAP(bbox)/IoU=0.75",
"Regex": ".*mAP\\(bbox\\)/IoU=0\\.75:\\s*(\\S+).*"
},
{
"Name": "mAP(bbox)/large",
"Regex": ".*mAP\\(bbox\\)/large:\\s*(\\S+).*"
},
{
"Name": "mAP(bbox)/medium",
"Regex": ".*mAP\\(bbox\\)/medium:\\s*(\\S+).*"
},
{
"Name": "mAP(bbox)/small",
"Regex": ".*mAP\\(bbox\\)/small:\\s*(\\S+).*"
},
{
"Name": "mAP(segm)/IoU=0.5",
"Regex": ".*mAP\\(segm\\)/IoU=0\\.5:\\s*(\\S+).*"
},
{
"Name": "mAP(segm)/IoU=0.5:0.95",
"Regex": ".*mAP\\(segm\\)/IoU=0\\.5:0\\.95:\\s*(\\S+).*"
},
{
"Name": "mAP(segm)/IoU=0.75",
"Regex": ".*mAP\\(segm\\)/IoU=0\\.75:\\s*(\\S+).*"
},
{
"Name": "mAP(segm)/large",
"Regex": ".*mAP\\(segm\\)/large:\\s*(\\S+).*"
},
{
"Name": "mAP(segm)/medium",
"Regex": ".*mAP\\(segm\\)/medium:\\s*(\\S+).*"
},
{
"Name": "mAP(segm)/small",
"Regex": ".*mAP\\(segm\\)/small:\\s*(\\S+).*"
}
]
```
## Define SageMaker Experiment
To define SageMaker Experiment, we first install `sagemaker-experiments` package.
```
! pip install sagemaker-experiments==0.1.20
```
Next, we import the SageMaker Experiment modules.
```
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
from smexperiments.trial_component import TrialComponent
from smexperiments.tracker import Tracker
import time
```
Next, we define a `Tracker` for tracking input data used in the SageMaker Trials in this Experiment. Specify the S3 URL of your dataset in the `value` below and change the name of the dataset if you are using a different dataset.
```
sm = session.client('sagemaker')
with Tracker.create(display_name="Preprocessing", sagemaker_boto_client=sm) as tracker:
# we can log the s3 uri to the dataset used for training
tracker.log_input(name="coco-2017-dataset",
media_type="s3/uri",
value= f's3://{s3_bucket}/{prefix}/input/train' # specify S3 URL to your dataset
)
```
Next, we create a SageMaker Experiment.
```
mrcnn_experiment = Experiment.create(
experiment_name=f"mask-rcnn-experiment-{int(time.time())}",
description="Mask R-CNN experiment",
sagemaker_boto_client=sm)
print(mrcnn_experiment)
```
We run the training job in your private VPC, so we need to set the ```subnets``` and ```security_group_ids``` prior to running the cell below. You may specify multiple subnet ids in the ```subnets``` list. The subnets included in the ```sunbets``` list must be part of the output of ```./stack-sm.sh``` CloudFormation stack script used to create this notebook instance. Specify only one security group id in ```security_group_ids``` list. The security group id must be part of the output of ```./stack-sm.sh``` script.
```
security_group_ids = # ['sg-xxxxxxxx']
subnets = # ['subnet-xxxxxxx', 'subnet-xxxxxxx', 'subnet-xxxxxxx']
sagemaker_session = sagemaker.session.Session(boto_session=session)
```
Next, we use SageMaker [Estimator](https://sagemaker.readthedocs.io/en/stable/estimators.html) API to define a SageMaker Training Job for each SageMaker Trial we need to run within the SageMaker Experiment.
We recommned using 8 GPUs, so we set ```train_instance_count=1``` and ```train_instance_type='ml.p3.16xlarge'```, because there are 8 Tesla V100 GPUs per ```ml.p3.16xlarge``` instance. We recommend using 100 GB [Amazon EBS](https://aws.amazon.com/ebs/) storage volume with each training instance, so we set ```train_volume_size = 100```. We want to replicate training data to each training instance, so we set ```input_mode= 'File'```.
Next, we will iterate through the Trial parameters and start two trials, one for ResNet architecture `resnet50`, and a second Trial for `resnet101`.
```
trial_params = [ ('resnet50', 'ImageNet-R50-AlignPadding.npz'),
('resnet101', 'ImageNet-R101-AlignPadding.npz')]
for resnet_arch, backbone_weights in trial_params:
hyperparameters['resnet_arch'] = resnet_arch
hyperparameters['backbone_weights'] = backbone_weights
trial_name = f"mask-rcnn-{resnet_arch}-{int(time.time())}"
mrcnn_trial = Trial.create(
trial_name=trial_name,
experiment_name=mrcnn_experiment.experiment_name,
sagemaker_boto_client=sm,
)
# associate the proprocessing trial component with the current trial
mrcnn_trial.add_trial_component(tracker.trial_component)
print(mrcnn_trial)
mask_rcnn_estimator = Estimator(image_uri=training_image,
role=role,
instance_count=4,
instance_type='ml.p3.16xlarge',
volume_size = 100,
max_run = 400000,
input_mode= 'File',
output_path=s3_output_location,
sagemaker_session=sagemaker_session,
hyperparameters = hyperparameters,
metric_definitions = metric_definitions,
subnets=subnets,
security_group_ids=security_group_ids)
# Specify directory path for log output on the EFS file system.
# You need to provide normalized and absolute path below.
# For example, '/mask-rcnn/sagemaker/output/log'
# Log output directory must not exist
file_system_directory_path = f'/mask-rcnn/sagemaker/output/{mrcnn_trial.trial_name}'
print(f"EFS log directory:{file_system_directory_path}")
# Create the log output directory.
# EFS file-system is mounted on '$HOME/efs' mount point for this notebook.
home_dir=os.environ['HOME']
local_efs_path = os.path.join(home_dir,'efs', file_system_directory_path[1:])
print(f"Creating log directory on EFS: {local_efs_path}")
assert not os.path.isdir(local_efs_path)
! sudo mkdir -p -m a=rw {local_efs_path}
assert os.path.isdir(local_efs_path)
# Specify the access mode of the mount of the directory associated with the file system.
# Directory must be mounted 'rw'(read-write).
file_system_access_mode = 'rw'
log = FileSystemInput(file_system_id=file_system_id,
file_system_type=file_system_type,
directory_path=file_system_directory_path,
file_system_access_mode=file_system_access_mode)
data_channels = {'train': train, 'log': log}
mask_rcnn_estimator.fit(inputs=data_channels,
job_name=mrcnn_trial.trial_name,
logs=True,
experiment_config={"TrialName": mrcnn_trial.trial_name,
"TrialComponentDisplayName": "Training"},
wait=False)
# sleep in between starting two trials
time.sleep(2)
search_expression = {
"Filters":[
{
"Name": "DisplayName",
"Operator": "Equals",
"Value": "Training",
},
{
"Name": "metrics.maskrcnn_loss/accuracy.max",
"Operator": "LessThan",
"Value": "1",
}
],
}
from sagemaker.analytics import ExperimentAnalytics
trial_component_analytics = ExperimentAnalytics(
sagemaker_session=sagemaker_session,
experiment_name=mrcnn_experiment.experiment_name,
search_expression=search_expression,
sort_by="metrics.maskrcnn_loss/accuracy.max",
sort_order="Descending",
parameter_names=['resnet_arch']
)
analytic_table = trial_component_analytics.dataframe()
for col in analytic_table.columns:
print(col)
bbox_map=analytic_table[['resnet_arch',
'mAP(bbox)/small - Max',
'mAP(bbox)/medium - Max',
'mAP(bbox)/large - Max']]
bbox_map
segm_map=analytic_table[['resnet_arch',
'mAP(segm)/small - Max',
'mAP(segm)/medium - Max',
'mAP(segm)/large - Max']]
segm_map
```
| github_jupyter |
# Boolean Assigmnet
```
a = True #Declare a boolean value and store it in a variable.
print(type(a)) #Check the type and print the id of the same.
print(id(a))
x , y = bool(6), bool(6) #Take one boolean value between 0 - 256.#Assign it to two different variables.
print(id(x)) #Check the id of both the variables.
print(id(y))
#Object Reusability Concept #It should come same. Check why?
#Arithmatic Operations on boolean data
r , s = True , False #Take two different boolean values.#Store them in two different variables.
sum = r + s #Find sum of both values
diff = r - s #Find differce between them
pro = r * s #Find the product of both.
t = s / r #Fnd value after dividing first value with second value
w = s % r #Find the remainder after dividing first value with second value
#Cant do for boolean #Find the quotient after dividing first value with second value
f = r ** s #Find the result of first value to the power of second value.
print(bool(sum)) #True
print(bool(diff)) #True
print(bool(pro)) #False
print(bool(t)) #False
print(bool(w)) #False
print(bool(f)) #True
#print(type(sum),type(r))
#print(Addition is bool(sum)) --Why this is giving False
#diff = x.difference(y)-- This will not work 'bool' object has no attribute 'difference'
#Division = You cannot divide by modulo by-- Zero ZeroDivisionError: division by zer
```
#Comparison Operators on boolean values
A , B = True , False #Take two different boolean values.#Store them in two different variables.
OP1 = A > B #Compare these two values with below operator:-
OP2 = A < B #less than, '<'
OP3 = A >= B #Greater than or equal to, '>='
OP4 = A <= B #Less than or equal to, '<='
print(type(OP1))
print(type(OP2))
print(type(OP3))
print(type(OP4))
#Observe their output(return type should be boolean)
```
#Equality Operator
C , D = True , False #Take two different boolean values.#Store them in two different variables.
print ( C == D) #Equuate them using equality operator (==, !=)
print ( C != D) #Observe the output(return type should be boolean)
#Logical operators
#Observe the output of below code #Cross check the output manually
print(True and True) #----------->Output is True
print(False and True) #----------->Output is False
print(True and False) #----------->Output is False
print(False and False) #----------->Output is False
print(True or True) #----------->Output is True
print(False or True) #----------->Output is True
print(True or False) #----------->Output is True
print(False or False) #----------->Output is False
print(not True) #----------->Output is False
print(not False) #----------->Output is True
#Bitwise Operators #Do below operations on the values provided below:-
#Bitwise and(&)
print(True & False)
print(True & True)
print(False & False)
print(False & False)
#Bitwise or(|) -----> True, False -------> Output is True
print(True | False)
print(True | True)
print(False | False)
print(False | False)
#Bitwise(^) -----> True, False -------> Output is True
print(True ^ False)
print(True ^ True)
print(False ^ False)
print(False ^ False)
#Bitwise negation(~) ------> True -------> Output is -2
print(~False)
print(~True)
#Bitwise left shift -----> True,2 -------> Output is 4
print(True << 2)
#Bitwise right shift ----------> True,2 -------> Output is 0
print(True >> 2)
#Cross check the output manually
#What is the output of expression inside print statement. Cross check before running the program.
a = True
b = True
print(a is b) #True or False? #
print(a is not b) #True or False?
a = False
b = False
print(a is b) #True or False?
print(a is not b) #True or False?
#Membership operation
#in, not in are two membership operators and it returns boolean value
print(True in [10,10.20,10+20j,'Python', True])
print(False in (10,10.20,10+20j,'Python', False))
print(True in {1,2,3, True})
print(True in {True:100, False:200, True:300})
print(False in {True:100, False:200, True:300})
```
| github_jupyter |
# Training on Multiple GPUs
:label:`sec_multi_gpu`
So far we discussed how to train models efficiently on CPUs and GPUs. We even showed how deep learning frameworks allow one to parallelize computation and communication automatically between them in :numref:`sec_auto_para`. We also showed in :numref:`sec_use_gpu` how to list all the available GPUs on a computer using the `nvidia-smi` command.
What we did *not* discuss is how to actually parallelize deep learning training.
Instead, we implied in passing that one would somehow split the data across multiple devices and make it work. The present section fills in the details and shows how to train a network in parallel when starting from scratch. Details on how to take advantage of functionality in high-level APIs is relegated to :numref:`sec_multi_gpu_concise`.
We assume that you are familiar with minibatch stochastic gradient descent algorithms such as the ones described in :numref:`sec_minibatch_sgd`.
## Splitting the Problem
Let us start with a simple computer vision problem and a slightly archaic network, e.g., with multiple layers of convolutions, pooling, and possibly a few fully-connected layers in the end.
That is, let us start with a network that looks quite similar to LeNet :cite:`LeCun.Bottou.Bengio.ea.1998` or AlexNet :cite:`Krizhevsky.Sutskever.Hinton.2012`.
Given multiple GPUs (2 if it is a desktop server, 4 on an AWS g4dn.12xlarge instance, 8 on a p3.16xlarge, or 16 on a p2.16xlarge), we want to partition training in a manner as to achieve good speedup while simultaneously benefitting from simple and reproducible design choices. Multiple GPUs, after all, increase both *memory* and *computation* ability. In a nutshell, we have the following choices, given a minibatch of training data that we want to classify.
First, we could partition the network across multiple GPUs. That is, each GPU takes as input the data flowing into a particular layer, processes data across a number of subsequent layers and then sends the data to the next GPU.
This allows us to process data with larger networks when compared with what a single GPU could handle.
Besides,
memory footprint per GPU can be well controlled (it is a fraction of the total network footprint).
However, the interface between layers (and thus GPUs) requires tight synchronization. This can be tricky, in particular if the computational workloads are not properly matched between layers. The problem is exacerbated for large numbers of GPUs.
The interface between layers also
requires large amounts of data transfer,
such as activations and gradients.
This may overwhelm the bandwidth of the GPU buses.
Moreover, compute-intensive, yet sequential operations are nontrivial to partition. See e.g., :cite:`Mirhoseini.Pham.Le.ea.2017` for a best effort in this regard. It remains a difficult problem and it is unclear whether it is possible to achieve good (linear) scaling on nontrivial problems. We do not recommend it unless there is excellent framework or operating system support for chaining together multiple GPUs.
Second, we could split the work layerwise. For instance, rather than computing 64 channels on a single GPU we could split up the problem across 4 GPUs, each of which generates data for 16 channels.
Likewise, for a fully-connected layer we could split the number of output units.
:numref:`fig_alexnet_original` (taken from :cite:`Krizhevsky.Sutskever.Hinton.2012`)
illustrates this design, where this strategy was used to deal with GPUs that had a very small memory footprint (2 GB at the time).
This allows for good scaling in terms of computation, provided that the number of channels (or units) is not too small.
Besides,
multiple GPUs can process increasingly larger networks since the available memory scales linearly.

:label:`fig_alexnet_original`
However,
we need a *very large* number of synchronization or barrier operations since each layer depends on the results from all the other layers.
Moreover, the amount of data that needs to be transferred is potentially even larger than when distributing layers across GPUs. Thus, we do not recommend this approach due to its bandwidth cost and complexity.
Last, we could partition data across multiple GPUs. This way all GPUs perform the same type of work, albeit on different observations. Gradients are aggregated across GPUs after each minibatch of training data.
This is the simplest approach and it can be applied in any situation.
We only need to synchronize after each minibatch. That said, it is highly desirable to start exchanging gradients parameters already while others are still being computed.
Moreover, larger numbers of GPUs lead to larger minibatch sizes, thus increasing training efficiency.
However, adding more GPUs does not allow us to train larger models.

:label:`fig_splitting`
A comparison of different ways of parallelization on multiple GPUs is depicted in :numref:`fig_splitting`.
By and large, data parallelism is the most convenient way to proceed, provided that we have access to GPUs with sufficiently large memory. See also :cite:`Li.Andersen.Park.ea.2014` for a detailed description of partitioning for distributed training. GPU memory used to be a problem in the early days of deep learning. By now this issue has been resolved for all but the most unusual cases. We focus on data parallelism in what follows.
## Data Parallelism
Assume that there are $k$ GPUs on a machine. Given the model to be trained, each GPU will maintain a complete set of model parameters independently though parameter values across the GPUs are identical and synchronized.
As an example,
:numref:`fig_data_parallel` illustrates
training with
data parallelism when $k=2$.

:label:`fig_data_parallel`
In general, the training proceeds as follows:
* In any iteration of training, given a random minibatch, we split the examples in the batch into $k$ portions and distribute them evenly across the GPUs.
* Each GPU calculates loss and gradient of the model parameters based on the minibatch subset it was assigned.
* The local gradients of each of the $k$ GPUs are aggregated to obtain the current minibatch stochastic gradient.
* The aggregate gradient is re-distributed to each GPU.
* Each GPU uses this minibatch stochastic gradient to update the complete set of model parameters that it maintains.
Note that in practice we *increase* the minibatch size $k$-fold when training on $k$ GPUs such that each GPU has the same amount of work to do as if we were training on a single GPU only. On a 16-GPU server this can increase the minibatch size considerably and we may have to increase the learning rate accordingly.
Also note that batch normalization in :numref:`sec_batch_norm` needs to be adjusted, e.g., by keeping a separate batch normalization coefficient per GPU.
In what follows we will use a toy network to illustrate multi-GPU training.
```
%matplotlib inline
import torch
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l
```
## [**A Toy Network**]
We use LeNet as introduced in :numref:`sec_lenet` (with slight modifications). We define it from scratch to illustrate parameter exchange and synchronization in detail.
```
# Initialize model parameters
scale = 0.01
W1 = torch.randn(size=(20, 1, 3, 3)) * scale
b1 = torch.zeros(20)
W2 = torch.randn(size=(50, 20, 5, 5)) * scale
b2 = torch.zeros(50)
W3 = torch.randn(size=(800, 128)) * scale
b3 = torch.zeros(128)
W4 = torch.randn(size=(128, 10)) * scale
b4 = torch.zeros(10)
params = [W1, b1, W2, b2, W3, b3, W4, b4]
# Define the model
def lenet(X, params):
h1_conv = F.conv2d(input=X, weight=params[0], bias=params[1])
h1_activation = F.relu(h1_conv)
h1 = F.avg_pool2d(input=h1_activation, kernel_size=(2, 2), stride=(2, 2))
h2_conv = F.conv2d(input=h1, weight=params[2], bias=params[3])
h2_activation = F.relu(h2_conv)
h2 = F.avg_pool2d(input=h2_activation, kernel_size=(2, 2), stride=(2, 2))
h2 = h2.reshape(h2.shape[0], -1)
h3_linear = torch.mm(h2, params[4]) + params[5]
h3 = F.relu(h3_linear)
y_hat = torch.mm(h3, params[6]) + params[7]
return y_hat
# Cross-entropy loss function
loss = nn.CrossEntropyLoss(reduction='none')
```
## Data Synchronization
For efficient multi-GPU training we need two basic operations.
First we need to have the ability to [**distribute a list of parameters to multiple devices**] and to attach gradients (`get_params`). Without parameters it is impossible to evaluate the network on a GPU.
Second, we need the ability to sum parameters across multiple devices, i.e., we need an `allreduce` function.
```
def get_params(params, device):
new_params = [p.to(device) for p in params]
for p in new_params:
p.requires_grad_()
return new_params
```
Let us try it out by copying the model parameters to one GPU.
```
new_params = get_params(params, d2l.try_gpu(0))
print('b1 weight:', new_params[1])
print('b1 grad:', new_params[1].grad)
```
Since we did not perform any computation yet, the gradient with regard to the bias parameter is still zero.
Now let us assume that we have a vector distributed across multiple GPUs. The following [**`allreduce` function adds up all vectors and broadcasts the result back to all GPUs**]. Note that for this to work we need to copy the data to the device accumulating the results.
```
def allreduce(data):
for i in range(1, len(data)):
data[0][:] += data[i].to(data[0].device)
for i in range(1, len(data)):
data[i][:] = data[0].to(data[i].device)
```
Let us test this by creating vectors with different values on different devices and aggregate them.
```
data = [torch.ones((1, 2), device=d2l.try_gpu(i)) * (i + 1) for i in range(2)]
print('before allreduce:\n', data[0], '\n', data[1])
allreduce(data)
print('after allreduce:\n', data[0], '\n', data[1])
```
## Distributing Data
We need a simple utility function to [**distribute a minibatch evenly across multiple GPUs**]. For instance, on two GPUs we would like to have half of the data to be copied to either of the GPUs.
Since it is more convenient and more concise, we use the built-in function from the deep learning framework to try it out on a $4 \times 5$ matrix.
```
data = torch.arange(20).reshape(4, 5)
devices = [torch.device('cuda:0'), torch.device('cuda:1')]
split = nn.parallel.scatter(data, devices)
print('input :', data)
print('load into', devices)
print('output:', split)
```
For later reuse we define a `split_batch` function that splits both data and labels.
```
#@save
def split_batch(X, y, devices):
"""Split `X` and `y` into multiple devices."""
assert X.shape[0] == y.shape[0]
return (nn.parallel.scatter(X, devices),
nn.parallel.scatter(y, devices))
```
## Training
Now we can implement [**multi-GPU training on a single minibatch**]. Its implementation is primarily based on the data parallelism approach described in this section. We will use the auxiliary functions we just discussed, `allreduce` and `split_and_load`, to synchronize the data among multiple GPUs. Note that we do not need to write any specific code to achieve parallelism. Since the computational graph does not have any dependencies across devices within a minibatch, it is executed in parallel *automatically*.
```
def train_batch(X, y, device_params, devices, lr):
X_shards, y_shards = split_batch(X, y, devices)
# Loss is calculated separately on each GPU
ls = [loss(lenet(X_shard, device_W), y_shard).sum()
for X_shard, y_shard, device_W in zip(
X_shards, y_shards, device_params)]
for l in ls: # Backpropagation is performed separately on each GPU
l.backward()
# Sum all gradients from each GPU and broadcast them to all GPUs
with torch.no_grad():
for i in range(len(device_params[0])):
allreduce([device_params[c][i].grad for c in range(len(devices))])
# The model parameters are updated separately on each GPU
for param in device_params:
d2l.sgd(param, lr, X.shape[0]) # Here, we use a full-size batch
```
Now, we can define [**the training function**]. It is slightly different from the ones used in the previous chapters: we need to allocate the GPUs and copy all the model parameters to all the devices.
Obviously each batch is processed using the `train_batch` function to deal with multiple GPUs. For convenience (and conciseness of code) we compute the accuracy on a single GPU, though this is *inefficient* since the other GPUs are idle.
```
def train(num_gpus, batch_size, lr):
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
devices = [d2l.try_gpu(i) for i in range(num_gpus)]
# Copy model parameters to `num_gpus` GPUs
device_params = [get_params(params, d) for d in devices]
num_epochs = 10
animator = d2l.Animator('epoch', 'test acc', xlim=[1, num_epochs])
timer = d2l.Timer()
for epoch in range(num_epochs):
timer.start()
for X, y in train_iter:
# Perform multi-GPU training for a single minibatch
train_batch(X, y, device_params, devices, lr)
torch.cuda.synchronize()
timer.stop()
# Evaluate the model on GPU 0
animator.add(epoch + 1, (d2l.evaluate_accuracy_gpu(
lambda x: lenet(x, device_params[0]), test_iter, devices[0]),))
print(f'test acc: {animator.Y[0][-1]:.2f}, {timer.avg():.1f} sec/epoch '
f'on {str(devices)}')
```
Let us see how well this works [**on a single GPU**].
We first use a batch size of 256 and a learning rate of 0.2.
```
train(num_gpus=1, batch_size=256, lr=0.2)
```
By keeping the batch size and learning rate unchanged and [**increasing the number of GPUs to 2**], we can see that the test accuracy roughly stays the same compared with
the previous experiment.
In terms of the optimization algorithms, they are identical. Unfortunately there is no meaningful speedup to be gained here: the model is simply too small; moreover we only have a small dataset, where our slightly unsophisticated approach to implementing multi-GPU training suffered from significant Python overhead. We will encounter more complex models and more sophisticated ways of parallelization going forward.
Let us see what happens nonetheless for Fashion-MNIST.
```
train(num_gpus=2, batch_size=256, lr=0.2)
```
## Summary
* There are multiple ways to split deep network training over multiple GPUs. We could split them between layers, across layers, or across data. The former two require tightly choreographed data transfers. Data parallelism is the simplest strategy.
* Data parallel training is straightforward. However, it increases the effective minibatch size to be efficient.
* In data parallelism, data are split across multiple GPUs, where each GPU executes its own forward and backward operation and subsequently gradients are aggregated and results are broadcast back to the GPUs.
* We may use slightly increased learning rates for larger minibatches.
## Exercises
1. When training on $k$ GPUs, change the minibatch size from $b$ to $k \cdot b$, i.e., scale it up by the number of GPUs.
1. Compare accuracy for different learning rates. How does it scale with the number of GPUs?
1. Implement a more efficient `allreduce` function that aggregates different parameters on different GPUs? Why is it more efficient?
1. Implement multi-GPU test accuracy computation.
[Discussions](https://discuss.d2l.ai/t/1669)
| github_jupyter |
# HW7 Extra Credit
### This extra credit assignment, worth 50 pts toward the homework score, analyzes the displacement amplitude spectrum for a small $M_L$=4.1 earthquake that occurred in Berkeley on December 4, 1998.
### Write python code to apply a ~$\frac{1}{f_2}$ source model with attenuation to the observed displacment amplitude spectrum to determine:
1. The scalar seismic moment
2. The corner frequency of the earthquake
3. The rupture area and slip
4. The stress drop.
5. Discuss your results in terms of what is typically found for earthquakes (use Lay and Wallace text as a reference).
### The SH Greens function solution for an anelastic halfspace is:
### u(f)=$\frac{2 * |R_{SH}| * M_0}{4 * \pi * \rho * \beta^3 *R} \cdot \frac{1}{[1 + (\frac{f}{f_c})^2]^{(\frac{p}{2})}} \cdot e^{(\frac{-f*\pi*R}{Q*\beta})}$
#### Where Rsh is the SH radiation pattern (eqn 8.65 Lay and Wallace), $M_0$ is the scalar moment, $\rho, \beta$, Q (range 10-100), R, f and $f_c$ (range .1 to 10 Hz) are the density, shear wave velocity, attenuation quality factor, total distance, frequency and corner frequency. The parameter p allows for adjusting the high frequency fall off rate of the spectrum. For a Brune source p=2 (a minimum value of p to conserve energy is 1.5, and typically the maximum is 3).
#### u(f) is the given amplitude spectrum plotted below.
#### Be sure to use CGS (cm, grams, seconds) units for all parameters. The unit for scalar moment will therefore be dyne cm.
#### Develop a nested for loop to search for optimal Mo, fc and Q parameters
```
#Initial Setup and Subroutine Definitions - Do Not Edit
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
#Model Parameters
distance=6.8e5 #units cm
depth=5.1e5 #units cm
azimuth=137.8*np.pi/180 #radians
strike=139*np.pi/180 #radians
rake=179*np.pi/180 #radians
dip=69*np.pi/180 #radians
beta=3.2e5 #cm/s
density=2.6 #grams/cc
#Compute Total distance (R), Azimuth(phi) and takeoff angle(I)
phi=strike-azimuth
R=np.sqrt(distance**2 + depth**2);
I=np.pi-np.arctan(distance/depth); #pi is for upgoing angle
# Read Data File and Setup frequency and amplitude spectral amplitude arrays
data=pd.read_csv('brkspec.txt', sep=' ', delimiter=None, header=None,
names = ['Hz','AmpSpec'])
freq=np.array(data["Hz"])
ampspec=np.array(data["AmpSpec"])
plt.loglog(freq,ampspec)
plt.title('Berkeley Event Amplitude Spectrum')
plt.xlabel('frequency (Hz)')
plt.ylabel('amplitude spectrum cm/Hz')
plt.savefig('brkspec.jpg')
plt.show()
##### Write code to calculate the SH radiation pattern coefficient
#Write code to fit the spectral model to the observed displacement spectrum.
#This can be accomplished with a nested for loop over the scalar moment and corner frequency
#parameters
#Define grid search range
Mo=np.arange(100.,400.,10.)*1e20 #dyne cm
fc=np.arange(0.1,10.,0.05)
q=np.arange(10.,100.,5.)
p=np.arange(2.0,3.5,10.)
#p=np.array([2.0, 2.0])
#Loop over model parameters and test for fit with data to determine best fit parameters
#Plot the fit to the data, and discuss the uncertainties in the solution
```
#### This is an example of the fit that can be obtained
<img src='brkspecfit.jpg'>
### Questions
1. What are the scalar seismic moment, Mw, corner frequency and Q that best fit the spectra assuming p=2.0?
2. How does the fit and the scalar moment, corner frequency and Q change if p=3.0?
3. The fault radius can be determed from the corner frequency where radius=0.37*beta/fc. Use the fault radius and moment to estimate the average slip and the stress drop of the earthquake
4. Discuss the estimated stress drop in terms of the expected range of values for earthquakes.
5. How well determined do you think your corner frequency and moment estimates are. How do uncertainties in those quantitites translate to uncertainty in stress drop?
```
#Use the corner frequency to estimate the fault rupture area, the average slip on the fault
#and the stress drop
```
| github_jupyter |
```
import matplotlib.pyplot as plt
max_frequency = "921600000"
eff_frequency = "768000000"
# Batch Size,Concurrency,Inferences/Second,Client Send,Network+Server Send/Recv,Server Queue,Server Compute Input,Server Compute Infer,Server Compute Output,Client Recv,p50 latency,p90 latency,p95 latency,p99 latency
# GPU Max
max_gpu_data = []
max_gpu_power_data = []
max_gpu_timings = []
with open(f"data/nano/{max_frequency}_data.csv", "r") as f:
max_gpu_data = [l.strip().split(",") for l in f.readlines()[1:]]
with open(f"data/nano/{max_frequency}_power.csv", "r") as f:
max_gpu_power_data = [l.strip().split(",") for l in f.readlines()]
with open(f"data/nano/{max_frequency}_timings.csv", "r") as f:
max_gpu_timings = [l.strip().split(",") for l in f.readlines()[1:]]
# GPU Eff
eff_gpu_data = []
eff_gpu_power_data = []
eff_gpu_timings = []
with open(f"data/nano/{eff_frequency}_data.csv", "r") as f:
eff_gpu_data = [l.strip().split(",") for l in f.readlines()[1:]]
with open(f"data/nano/{eff_frequency}_power.csv", "r") as f:
eff_gpu_power_data = [l.strip().split(",") for l in f.readlines()]
with open(f"data/nano/{eff_frequency}_timings.csv", "r") as f:
eff_gpu_timings = [l.strip().split(",") for l in f.readlines()[1:]]
# DLA Max
dla_max_data = []
dla_max_power_data = []
dla_max_timings = []
with open(f"data/nano/{eff_frequency}_dla_data.csv", "r") as f:
dla_max_data = [l.strip().split(",") for l in f.readlines()[1:]]
with open(f"data/nano/{eff_frequency}_dla_power.csv", "r") as f:
dla_max_power_data = [l.strip().split(",") for l in f.readlines()[1:]]
with open(f"data/nano/{eff_frequency}_dla_timings.csv", "r") as f:
dla_max_timings = [l.strip().split(",") for l in f.readlines()[1:]]
dla_power = [p for p in dla_max_power_data if p[0] > dla_max_timings[0][0] and p[0] < dla_max_timings[0][1]]
avg_dla_power = sum([float(r[1]) for r in dla_power])/len(dla_power)
joule_per_inference = avg_dla_power / float(dla_max_data[0][2])
max_x = ['TRT']
max_y = [joule_per_inference*1000]
# DLA Eff
dla_eff_data = []
dla_eff_power_data = []
dla_eff_timings = []
with open(f"data/nano/{max_frequency}_dla_data.csv", "r") as f:
dla_eff_data = [l.strip().split(",") for l in f.readlines()[1:]]
with open(f"data/nano/{max_frequency}_dla_power.csv", "r") as f:
dla_eff_power_data = [l.strip().split(",") for l in f.readlines()[1:]]
with open(f"data/nano/{max_frequency}_dla_timings.csv", "r") as f:
dla_eff_timings = [l.strip().split(",") for l in f.readlines()[1:]]
dla_power = [p for p in dla_eff_power_data if p[0] > dla_eff_timings[0][0] and p[0] < dla_eff_timings[0][1]]
avg_dla_power = sum([float(r[1]) for r in dla_power])/len(dla_power)
joule_per_inference = avg_dla_power / float(dla_eff_data[0][2])
eff_x = ['TRT']
eff_y = [joule_per_inference*1000]
for start, stop, batch_size in max_gpu_timings:
power = [p for p in max_gpu_power_data if p[0] > start and p[0] < stop]
d = [d for d in max_gpu_data if d[0] == batch_size][0]
avg_power = sum([float(r[1]) for r in power])/len(power)
joule_per_inference = avg_power / float(d[2])
max_x.append(batch_size)
max_y.append(joule_per_inference*1000)
for start, stop, batch_size in eff_gpu_timings:
power = [p for p in eff_gpu_power_data if p[0] > start and p[0] < stop]
d = [d for d in eff_gpu_data if d[0] == batch_size][0]
avg_power = sum([float(r[1]) for r in power])/len(power)
joule_per_inference = avg_power / float(d[2])
eff_x.append(batch_size)
eff_y.append(joule_per_inference*1000)
export_file = open("../export/nano_energy_by_batch.csv", "w")
fig, ax = plt.subplots(1, 2, figsize=(25, 10), sharey=True)
ax[0].set_axisbelow(True)
ax[0].grid(axis='y')
ax[0].set_title(f"Energy Usage per Inference {int(max_frequency)/1000000}MHz")
ax[0].set_xlabel("Batch Size")
ax[0].set_ylabel("Energy(mJ) per Inference")
ax[0].bar(max_x, max_y)
export_file.write(f"{max_x}\n")
export_file.write(f"{max_y}\n\n")
ax[1].set_axisbelow(True)
ax[1].grid(axis='y')
ax[1].yaxis.set_tick_params(labelleft=True)
ax[1].set_title(f"Energy Usage per Inference {int(eff_frequency)/1000000}MHz")
ax[1].set_xlabel("Batch Size")
ax[1].set_ylabel("Energy(mJ) per Inference")
ax[1].bar(eff_x, eff_y)
export_file.write(f"{eff_x}\n")
export_file.write(f"{eff_y}\n")
export_file.close()
batch_size = 4
data = []
power = []
timings = []
with open(f"data/nano/batch_{batch_size}_data.csv", "r") as f_data, open(f"data/nano/batch_{batch_size}_power.csv", "r") as f_power, open(f"data/nano/batch_{batch_size}_timings.csv", "r") as f_timings:
data = [l.strip().split(",") for l in f_data.readlines()[1:]]
power = [l.strip().split(",") for l in f_power.readlines()[1:]]
timings = [l.strip().split(",") for l in f_timings.readlines()[1:]]
x = []
y = []
for start, stop, frequency in timings:
power_raw = [p for p in power if p[0] > start and p[0] < stop]
d = [d for d in data if d[0] == frequency][0]
avg_power = sum([float(r[1]) for r in power_raw])/len(power_raw)
joule_per_inference = avg_power / float(d[2])
x.append(str(int(frequency)/1000000))
y.append(joule_per_inference*1000)
normalized_y = [y[-1]/_y for _y in y]
fig, ax = plt.subplots(1, 2, figsize=(25, 10))
ax[0].set_axisbelow(True)
ax[0].grid(axis='y')
ax[0].set_title(f"Energy Usage per Inference per Frequency, Batch Size of {batch_size}")
ax[0].set_xlabel("Frequency MHz")
ax[0].set_ylabel("Energy(mJ) per Inference")
ax[0].bar(x, y)
ax[1].set_axisbelow(True)
ax[1].grid(axis='y')
ax[1].set_title(f"Normalized Energy Usage, Batch Size of {batch_size}")
ax[1].set_xlabel("Frequency MHz")
ax[1].set_ylabel("Normalized Power Usage")
ax[1].bar(x, normalized_y)
batch_sizes = [1, 2, 4, 8, 16, 32]
data = []
power = []
timings = []
for size in batch_sizes:
with open(f"data/nano/batch_{size}_data.csv", "r") as f_data, open(f"data/nano/batch_{size}_power.csv", "r") as f_power, open(f"data/nano/batch_{size}_timings.csv", "r") as f_timings:
data.append([l.strip().split(",") for l in f_data.readlines()[1:]])
power.append([l.strip().split(",") for l in f_power.readlines()[1:]])
timings.append([l.strip().split(",") for l in f_timings.readlines()[1:]])
x = []
y = []
for idx, timing_data in enumerate(timings):
x.append([])
y.append([])
for start, stop, frequency in timing_data:
power_raw = [p for p in power[idx] if p[0] > start and p[0] < stop]
d = [d for d in data[idx] if d[0] == frequency][0]
avg_power = sum([float(r[1]) for r in power_raw])/len(power_raw)
joule_per_inference = avg_power / float(d[2])
x[idx].append(str(int(frequency)/1000000))
y[idx].append(joule_per_inference*1000)
normalized_y = []
for idx, _ in enumerate(batch_sizes):
normalized_y.append([y[idx][-1]/_y for _y in y[idx]])
export_file = open("../export/nano_energy_by_freq.csv", "w")
fig, ax = plt.subplots(1, 2, figsize=(25, 10))
ax[0].set_axisbelow(True)
ax[0].grid(axis='y')
ax[0].set_title(f"Energy Usage per Inference by Freqency")
ax[0].set_xlabel("Frequency MHz")
ax[0].set_ylabel("Energy(mJ) per Inference")
for idx, size in enumerate(batch_sizes):
ax[0].plot(x[idx], y[idx], label=f"{size}")
export_file.write(f"{x[idx]}\n{y[idx]}\n")
ax[0].legend(loc="upper left", title="Batch Size")
export_file.write(f"\n\n")
ax[1].set_axisbelow(True)
ax[1].grid(axis='y')
ax[1].set_title(f"Normalized Efficiency")
ax[1].set_xlabel("Frequency MHz")
ax[1].set_ylabel("Normalized Efficiency")
for idx, size in enumerate(batch_sizes):
ax[1].plot(x[idx], normalized_y[idx], label=f"{size}")
export_file.write(f"{x[idx]}\n{normalized_y[idx]}\n")
ax[1].legend(loc="upper left", title="Batch Size")
export_file.close()
```
| github_jupyter |
# Case 2. Diabetic Retinopathy Analysis
Sanni Tolonen<br>
26.2.2018<br>
Cognitive Systems for Health Technology Applications, Spring 2018<br>
Helsinki Metropolia University of Applied Sciences
<h2>1. Objectives</h2><br>
The aim of this assignment is to learn to use convolutional neural networks to classify medical
images.<br>
For a little help in this assignment I checked what Ben Graham and his team had done in Kaggle Diabetic Retinopathy Detection competition report.
<h2> 2. Required libraries </h2>
```
# import libraries and functions
import numpy as np
import matplotlib.pyplot as plt
import pickle
from keras import layers
from keras import models
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
import time
```
Numpy is used for scientific computing and creating multidimensional arrays, matplotlib for ploting figures, pickle for saving the history of the model, keras for building the convolutional neural network and time for calculating time.
<h2> 3. Data description and preprocessing </h2>
This dataset is a large set of high-resolution retina images taken under a variety of imaging conditions.
A clinician has rated the presence of diabetic retinopathy in each image on a scale of 0 to 4:
<ul>
<li>0 - No DR</li>
<li>1 - Mild</li>
<li>2 - Moderate</li>
<li>3 - Severe</li>
<li>4 - Proliferative DR</li>
</ul>
The images come from different models and types of cameras, which can affect the visual appearance. Some images are shown as one would see the retina anatomically meaning macula on the left, optic nerve on the right for the right eye. Others are shown as one would see through a microscope condensing lens in example inverted, as one sees in a typical live eye exam. There are two ways to tell if an image is inverted:
It is inverted if the macula, the small dark central area, is slightly higher than the midline through the optic nerve. If the macula is lower than the midline of the optic nerve, it's not inverted.
If there is a notch on the side of the image (square, triangle, or circle) then it's not inverted. If there is no notch, it's inverted.
```
# dataset directories and labels files
train_dir = "../dataset2/train"
validation_dir = "../dataset2/validation"
test_dir = "../dataset2/test"
# create datagenerators
train_datagen = ImageDataGenerator(rescale=1./255,
fill_mode='nearest',
horizontal_flip=True,
zoom_range=0.2)
validation_datagen = ImageDataGenerator(rescale = 1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
# training parameters
batch_size = 50
epochs = 50
steps_per_epoch = 25
validation_steps = 10
image_height = 150
image_width = 200
# generator for train dataset
print('Training dataset.')
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size = (image_height, image_width),
batch_size = batch_size,
class_mode = 'binary')
# generator for validation dataset
print('Validation dataset.')
validation_generator = validation_datagen.flow_from_directory(
validation_dir,
target_size = (image_height, image_width),
batch_size = batch_size,
class_mode = 'binary')
# generator for test dataset
print('Test dataset.')
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size = (image_height, image_width),
batch_size = batch_size,
class_mode = 'binary')
```
Dataset is splitted to train, validation and test datasets. All images will be rescaled by 1./255 and resized to 150x200. Training set is supplemented. It's filling mode is choosed 'nearest' which means that if there are generated empty pixels in prosessing generator is able to choose a pixel value from nearest pixel that has a value. It's also accepting horizontal flip, zoom range is maxium in 20%. For preprocessing I first tried the preprocessing function designed by Sakari Lukkarinen but I had some issues with that so I did some research and used a different approach.
<h2> 4. Modeling and compilation </h2>
This model is almost exactly alike the one in Sakaris GitHub repository. I wanted to try with this one also, since I had problems with the other one. For starters, there is two Conv2D layers followed by one MaxPool2D layer. After two sets of these, there is two Conv2D layers and then two sets of two Conv2D layers with a Dropout layer for weight regularization to avoid overfitting. In the end there is Flatten layer to flatten the input, a couple of Dense leyers and another Dropout layer.
```
# build the model
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation = 'relu',
input_shape = (image_height, image_width, 3)))
model.add(layers.Conv2D(32, (3, 3), activation = 'relu'))
model.add(layers.MaxPool2D((3, 3), strides=2))
model.add(layers.Conv2D(64, (3, 3), activation = 'relu'))
model.add(layers.Conv2D(64, (3, 3), activation = 'relu'))
model.add(layers.MaxPool2D((3, 3), strides=2))
model.add(layers.Conv2D(96, (3, 3), activation = 'relu'))
model.add(layers.Conv2D(96, (3, 3), activation = 'relu'))
model.add(layers.MaxPool2D((3, 3), strides=2))
model.add(layers.Conv2D(128, (3, 3), activation = 'relu'))
model.add(layers.Conv2D(128, (3, 3), activation = 'relu'))
model.add(layers.Conv2D(160, (3, 3), activation = 'relu'))
model.add(layers.Conv2D(160, (3, 3), activation = 'relu'))
model.add(layers.Dropout(0.1))
model.add(layers.Conv2D(192, (3, 3), activation = 'relu'))
model.add(layers.Conv2D(192, (3, 3), activation = 'relu'))
model.add(layers.Dropout(0.1))
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
# compile the model
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(),
metrics=['acc'])
```
<h2> 5. Training and validation </h2>
```
# train the model
t1 = time.time()
h = model.fit_generator(
train_generator,
steps_per_epoch = steps_per_epoch,
verbose = 1,
epochs = epochs,
validation_data = validation_generator,
validation_steps = validation_steps)
t2 = time.time()
# store the elapsed time into history
h.history.update({'time_elapsed': t2 - t1})
# save the model and history
model.save('case_2_run_3.h5')
pickle.dump(h.history, open('case_2_history_3.p', 'wb'))
print('Time per epoch {:.2f} hours.'.format((t2-t1)/3600))
print('Time per epoch {:.2f} minutes.'.format((t2-t1)/40/60))
```
<h2> 6. Evaluation </h2>
Here the model created above is tested with the testing set.
```
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size = (image_height, image_width),
batch_size = batch_size,
class_mode = 'binary')
r = model.evaluate_generator(test_generator, steps = 20)
# loss and accuracy
r
```
<h2> 7. Results and discussion </h2>
Training accuracy is still under 0.75 the whole time. The final testing accuracy is 0.71. In the end of training the loss function is 4.77 which is really big. This means that the inconsistency between predicted value and actual label is large. The final testing loss function is 4.70.
```
acc = h.history['acc']
val_acc = h.history['val_acc']
loss = h.history['loss']
val_loss = h.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accracy')
plt.title('Training and validation accuracy')
plt.ylim([0, 1])
plt.xlabel('Epochs')
plt.grid()
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.ylim([0, 10])
plt.xlabel('Epochs')
plt.grid()
plt.legend()
plt.show()
```
<h2>8. Conclusions</h2>
I had still the same problem, validation accuracy and validation loss stay the same and the results are even worse, I can not seem to understand what is going wrong.
| github_jupyter |
```
from __future__ import division
import numpy as np
from numpy import linalg as LA
#np.seterr(divide='ignore') # these warnings are usually harmless for this code
from matplotlib import pyplot as plt
import matplotlib
%matplotlib inline
import os
import scipy.stats as stats
import pyhsmm
from pyhsmm.util.text import progprint_xrange
import pyhsmm.basic.distributions as distributions
import scipy.io as sio
import csv
import copy
import time
import pickle
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Table, MetaData, Column, Integer, String
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sklearn import preprocessing
filename = 'data_devices_trip.sav'
data_devices_trip = pickle.load(open(filename, 'rb'))
#EEFECTS: return new data in form: data = {} and data[device]={"trip":[]}
def dataTransform(data_devices):
data = {}
for i, devi in enumerate(data_devices):
#print(i, devi)
data[devi] = {}
for ii in range(data_devices[devi].shape[0]):
data_temp = data_devices[devi][ii]
trip = int(data_temp[0])
speed = data_temp[1]
acc = data_temp[2]
try:
data[devi][trip].append([speed,acc])
except:
data[devi][trip] = []
data[devi][trip].append([speed,acc])
return data
# get data_devices_trip = {} and data_devices_trip[device]={"trip":[]}
filename = 'data_devices.sav'
data_devices = pickle.load(open(filename, 'rb'))
data_devices_trip = dataTransform(data_devices)
#another way to get data_devices_trip, but this way is a little bit slow
#filename = 'data_devices_trip.sav'
#data_devices_trip = pickle.load(open(filename, 'rb'))
posteriormodels = {}
i = 0
for devi, value1 in data_devices_trip.items() :
#for i, devi in enumerate(data_devices):
print('devi', devi)
if(len(data_devices_trip[devi]) == 0):
print('oops, this is a none set')
continue
else:
posteriormodels[devi]={}
for trip,value2 in data_devices_trip[devi].items():
print('trip',trip)
data_trip = np.array(data_devices_trip[devi][trip])
data_scaled = preprocessing.scale(data_trip)#implement data normalization
Nmax = 200 # preset the maximum states
# and some hyperparameters
obs_dim = data_scaled.shape[1] # data dimensions
obs_hypparams = {'mu_0':np.zeros(np.int(obs_dim)),
'sigma_0':np.eye(np.int(obs_dim)),
'kappa_0':0.25, # 0.2 5
'nu_0':obs_dim+2}
# Define the observation distribution
obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(Nmax)]
# Define the posterior inference model
posteriormodels[devi][trip] = pyhsmm.models.WeakLimitStickyHDPHMM(
kappa=6.,alpha=1.,gamma=1.,init_state_concentration=1.,
obs_distns=obs_distns)
# Sampling process, for 100 round
Sampling_step = 100
Sampling_xaxis = range(1,Sampling_step+1)
# Add the data to the model and train
posteriormodels[devi][trip].add_data(data_scaled)
Meth2_LLH = np.zeros((Sampling_step,1))
# Sampling process, for 100 around
for idx in progprint_xrange(Sampling_step):
posteriormodels[devi][trip].resample_model()
#Meth2_LLH[idx] = posteriormodel.log_likelihood()
i = i + 1
if i == 6:
break
# save the model to disk
filename = 'posterior_models_test.sav'
pickle.dump(posteriormodels, open(filename, 'wb'))
posteriormodels = {}
i = 0
for devi, value1 in data_devices_trip.items() :
#for i, devi in enumerate(data_devices):
print('devi', devi)
if(len(data_devices_trip[devi]) == 0):
print('oops, this is a none set')
continue
else:
posteriormodels[devi]={}
i = i + 1
if i == 6:
break
```
| github_jupyter |
```
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Vertex SDK: AutoML training image classification model for batch prediction
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/sdk/sdk_automl_image_classification_batch.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/sdk/sdk_automl_image_classification_batch.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
</table>
<br/><br/><br/>
## Overview
This tutorial demonstrates how to use the Vertex SDK to create image classification models and do batch prediction using Google Cloud's [AutoML](https://cloud.google.com/vertex-ai/docs/start/automl-users).
### Dataset
The dataset used for this tutorial is the [Flowers dataset](https://www.tensorflow.org/datasets/catalog/tf_flowers) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the type of flower an image is from a class of five flowers: daisy, dandelion, rose, sunflower, or tulip.
### Objective
In this tutorial, you create an AutoML image classification model from a Python script, and then do a batch prediction using the Vertex SDK. You can alternatively create and deploy models using the `gcloud` command-line tool or online using the Google Cloud Console.
The steps performed include:
- Create a Vertex `Dataset` resource.
- Train the model.
- View the model evaluation.
- Make a batch prediction.
There is one key difference between using batch prediction and using online prediction:
* Prediction Service: Does an on-demand prediction for the entire set of instances (i.e., one or more data items) and returns the results in real-time.
* Batch Prediction Service: Does a queued (batch) prediction for the entire set of instances in the background and stores the results in a Cloud Storage bucket when ready.
### Costs
This tutorial uses billable components of Google Cloud (GCP):
* Vertex AI
* Cloud Storage
Learn about [Vertex AI
pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage
pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
Calculator](https://cloud.google.com/products/calculator/)
to generate a cost estimate based on your projected usage.
## Installation
Install the latest version of Vertex SDK.
```
import sys
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = '--user'
else:
USER_FLAG = ''
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
```
Install the latest GA version of *google-cloud-storage* library as well.
```
! pip3 install -U google-cloud-storage $USER_FLAG
```
### Restart the kernel
Once you've installed the Vertex SDK and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages.
```
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
```
## Before you begin
### GPU runtime
*Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU**
### Set up your Google Cloud project
**The following steps are required, regardless of your notebook environment.**
1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
3. [Enable the Vertex APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)
4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook.
5. Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
```
PROJECT_ID = "[your-project-id]" #@param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
```
#### Region
You can also change the `REGION` variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you.
- Americas: `us-central1`
- Europe: `europe-west4`
- Asia Pacific: `asia-east1`
You may not use a multi-regional bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see the [Vertex locations documentation](https://cloud.google.com/ai-platform-unified/docs/general/locations)
```
REGION = 'us-central1' #@param {type: "string"}
```
#### Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.
```
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
```
### Authenticate your Google Cloud account
**If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step.
**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
**Otherwise**, follow these steps:
In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.
**Click Create service account**.
In the **Service account name** field, enter a name, and click **Create**.
In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
Click Create. A JSON file that contains your key downloads to your local environment.
Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
```
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
```
### Create a Cloud Storage bucket
**The following steps are required, regardless of your notebook environment.**
When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
```
BUCKET_NAME = "gs://[your-bucket-name]" #@param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
```
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
```
! gsutil mb -l $REGION $BUCKET_NAME
```
Finally, validate access to your Cloud Storage bucket by examining its contents:
```
! gsutil ls -al $BUCKET_NAME
```
### Set up variables
Next, set up some variables used throughout the tutorial.
### Import libraries and define constants
```
import google.cloud.aiplatform as aip
```
## Initialize Vertex SDK
Initialize the Vertex SDK for your project and corresponding bucket.
```
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
```
# Tutorial
Now you are ready to start creating your own AutoML image classification model.
## Create a Dataset Resource
First, you create an image Dataset resource for the Flowers dataset.
### Data preparation
The Vertex `Dataset` resource for images has some requirements for your data:
- Images must be stored in a Cloud Storage bucket.
- Each image file must be in an image format (PNG, JPEG, BMP, ...).
- There must be an index file stored in your Cloud Storage bucket that contains the path and label for each image.
- The index file must be either CSV or JSONL.
#### CSV
For image classification, the CSV index file has the requirements:
- No heading.
- First column is the Cloud Storage path to the image.
- Second column is the label.
#### Location of Cloud Storage training data.
Now set the variable `IMPORT_FILE` to the location of the CSV index file in Cloud Storage.
```
IMPORT_FILE = 'gs://cloud-samples-data/vision/automl_classification/flowers/all_data_v2.csv'
```
#### Quick peek at your data
You will use a version of the Flowers dataset that is stored in a public Cloud Storage bucket, using a CSV index file.
Start by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (`wc -l`) and then peek at the first few rows.
```
if 'IMPORT_FILES' in globals():
FILE = IMPORT_FILES[0]
else:
FILE = IMPORT_FILE
count = ! gsutil cat $FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
! gsutil cat $FILE | head
```
### Create the Dataset
Next, create the `Dataset` resource using the `create()` method for the `ImageDataset` class, which takes the following parameters:
- `display_name`: The human readable name for the `Dataset` resource.
- `gcs_source`: A list of one or more dataset index file to import the data items into the `Dataset` resource.
- `import_schema_uri`: The data labeling schema for the data items.
This operation may take several minutes.
```
dataset = aip.ImageDataset.create(
display_name="Flowers" + "_" + TIMESTAMP,
gcs_source=[IMPORT_FILE],
import_schema_uri=aip.schema.dataset.ioformat.image.single_label_classification,
)
print(dataset.resource_name)
```
## Train the model
Now train an AutoML image classification model using your Vertex `Dataset` resource. To train the model, do the following steps:
1. Create an Vertex training pipeline for the `Dataset` resource.
2. Execute the pipeline to start the training.
### Create and run training pipeline
To train an AutoML image classification model, you perform two steps: 1) create a training pipeline, and 2) run the pipeline.
#### Create training pipeline
An AutoML training pipeline is created with the `AutoMLImageTrainingJob` class, with the following parameters:
- `display_name`: The human readable name for the `TrainingJob` resource.
- `prediction_type`: The type task to train the model for.
- `classification`: An image classification model.
- `object_detection`: An image object detection model.
- `multi_label`: If a classification task, whether single (`False`) or multi-labeled (`True`).
- `model_type`: The type of model for deployment.
- `CLOUD`: Deployment on Google Cloud
- `CLOUD_HIGH_ACCURACY_1`: Optimized for accuracy over latency for deployment on Google Cloud.
- `CLOUD_LOW_LATENCY_`: Optimized for latency over accuracy for deployment on Google Cloud.
- `MOBILE_TF_VERSATILE_1`: Deployment on an edge device.
- `MOBILE_TF_HIGH_ACCURACY_1`:Optimized for accuracy over latency for deployment on an edge device.
- `MOBILE_TF_LOW_LATENCY_1`: Optimized for latency over accuracy for deployment on an edge device.
- `base_model`: (optional) Transfer learning from existing `Model` resource -- supported for image classification only.
The instantiated object is the DAG for the training job.
```
dag = aip.AutoMLImageTrainingJob(
display_name="flowers_" + TIMESTAMP,
prediction_type="classification",
multi_label=False,
model_type="CLOUD",
base_model=None,
)
```
#### Run the training pipeline
Next, you run the DAG to start the training job by invoking the method `run()`, with the following parameters:
- `dataset`: The `Dataset` resource to train the model.
- `model_display_name`: The human readable name for the trained model.
- `training_fraction_split`: The percentage of the dataset to use for training.
- `validation_fraction_split`: The percentage of the dataset to use for validation.
- `test_fraction_split`: The percentage of the dataset to use for test (holdout data).
- `budget_milli_node_hours`: (optional) Maximum training time specified in unit of millihours (1000 = hour).
- `disable_early_stopping`: If `True`, training maybe completed before using the entire budget if the service believes it cannot further improve on the model objective measurements.
The `run` method when completed returns the `Model` resource.
The execution of the training pipeline will take upto 20 minutes.
```
model = dag.run(
dataset=dataset,
model_display_name="flowers_" + TIMESTAMP,
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
budget_milli_node_hours=8000,
disable_early_stopping=False
)
```
## Model deployment for batch prediction
Now deploy the trained Vertex `Model` resource you created for batch prediction. This differs from deploying a `Model` resource for online prediction.
For online prediction, you:
1. Create an `Endpoint` resource for deploying the `Model` resource to.
2. Deploy the `Model` resource to the `Endpoint` resource.
3. Make online prediction requests to the `Endpoint` resource.
For batch-prediction, you:
1. Create a batch prediction job.
2. The job service will provision resources for the batch prediction request.
3. The results of the batch prediction request are returned to the caller.
4. The job service will unprovision the resoures for the batch prediction request.
## Make a batch prediction request
Now do a batch prediction to your deployed model.
### Get test item(s)
Now do a batch prediction to your Vertex model. You will use arbitrary examples out of the dataset as a test items. Don't be concerned that the examples were likely used in training the model -- we just want to demonstrate how to make a prediction.
```
test_items = !gsutil cat $IMPORT_FILE | head -n2
if len(str(test_items[0]).split(',')) == 3:
_, test_item_1, test_label_1 = str(test_items[0]).split(',')
_, test_item_2, test_label_2 = str(test_items[1]).split(',')
else:
test_item_1, test_label_1 = str(test_items[0]).split(',')
test_item_2, test_label_2 = str(test_items[1]).split(',')
print(test_item_1, test_label_1)
print(test_item_2, test_label_2)
```
### Copy test item(s)
For the batch prediction, you will copy the test items over to your Cloud Storage bucket.
```
file_1 = test_item_1.split('/')[-1]
file_2 = test_item_2.split('/')[-1]
! gsutil cp $test_item_1 $BUCKET_NAME/$file_1
! gsutil cp $test_item_2 $BUCKET_NAME/$file_2
test_item_1 = BUCKET_NAME + "/" + file_1
test_item_2 = BUCKET_NAME + "/" + file_2
```
### Make the batch input file
Now make a batch input file, which you will store in your local Cloud Storage bucket. The batch input file can be either CSV or JSONL. You will use JSONL in this tutorial. For JSONL file, you make one dictionary entry per line for each data item (instance). The dictionary contains the key/value pairs:
- `content`: The Cloud Storage path to the image.
- `mime_type`: The content type. In our example, it is an `jpeg` file.
For example:
{'content': '[your-bucket]/file1.jpg', 'mime_type': 'jpeg'}
```
import tensorflow as tf
import json
gcs_input_uri = BUCKET_NAME + '/test.jsonl'
with tf.io.gfile.GFile(gcs_input_uri, 'w') as f:
data = {"content": test_item_1, "mime_type": "image/jpeg"}
f.write(json.dumps(data) + '\n')
data = {"content": test_item_2, "mime_type": "image/jpeg"}
f.write(json.dumps(data) + '\n')
print(gcs_input_uri)
! gsutil cat $gcs_input_uri
```
### Make the batch prediction request
Now that your `Model` resource is trained, you can make a batch prediction by invoking the `batch_request()` method, with the following parameters:
- `job_display_name`: The human readable name for the batch prediction job.
- `gcs_source`: A list of one or more batch request input files.
- `gcs_destination_prefix`: The Cloud Storage location for storing the batch prediction resuls.
- `sync`: If set to `True`, the call will block while waiting for the asynchronous batch job to complete.
```
batch_predict_job = model.batch_predict(
job_display_name="$(DATASET_ALIAS)_" + TIMESTAMP,
gcs_source=gcs_input_uri,
gcs_destination_prefix=BUCKET_NAME,
sync=False
)
print(batch_predict_job)
```
### Wait for completion of batch prediction job
Next, wait for the batch job to complete.
```
batch_predict_job.wait()
```
### Get the predictions
Next, get the results from the completed batch prediction job.
The results are written to the Cloud Storage output bucket you specified in the batch prediction request. You call the method `iter_outputs()` to get a list of each Cloud Storage file generated with the results. Each file contains one or more prediction requests in a JSON format:
- `content`: The prediction request.
- `prediction`: The prediction response.
- `ids`: The internal assigned unique identifiers for each prediction request.
- `displayNames`: The class names for each class label.
- `confidences`: The predicted confidence, between 0 and 1, per class label.
```
bp_iter_outputs = batch_predict_job.iter_outputs()
prediction_results = list()
for blob in bp_iter_outputs:
if blob.name.split("/")[-1].startswith("prediction"):
prediction_results.append(blob.name)
tags = list()
for prediction_result in prediction_results:
gfile_name = f"gs://{bp_iter_outputs.bucket.name}/{prediction_result}"
with tf.io.gfile.GFile(name=gfile_name, mode="r") as gfile:
for line in gfile.readlines():
line = json.loads(line)
print(line)
break
```
# Cleaning up
To clean up all GCP resources used in this project, you can [delete the GCP
project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
- Dataset
- Pipeline
- Model
- Endpoint
- Batch Job
- Custom Job
- Hyperparameter Tuning Job
- Cloud Storage Bucket
```
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex dataset object
try:
if delete_dataset and 'dataset' in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if delete_model and 'model' in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if delete_endpoint and 'model' in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if delete_batchjob and 'model' in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
if delete_bucket and 'BUCKET_NAME' in globals():
! gsutil rm -r $BUCKET_NAME
```
| github_jupyter |
# Deep Markov Model
## Introduction
We're going to build a deep probabilistic model for sequential data: the deep markov model. The particular dataset we want to model is composed of snippets of polyphonic music. Each time slice in a sequence spans a quarter note and is represented by an 88-dimensional binary vector that encodes the notes at that time step.
Since music is (obviously) temporally coherent, we need a model that can represent complex time dependencies in the observed data. It would not, for example, be appropriate to consider a model in which the notes at a particular time step are independent of the notes at previous time steps. One way to do this is to build a latent variable model in which the variability and temporal structure of the observations is controlled by the dynamics of the latent variables.
One particular realization of this idea is a markov model, in which we have a chain of latent variables, with each latent variable in the chain conditioned on the previous latent variable. This is a powerful approach, but if we want to represent complex data with complex (and in this case unknown) dynamics, we would like our model to be sufficiently flexible to accommodate dynamics that are potentially highly non-linear. Thus a deep markov model: we allow for the transition probabilities governing the dynamics of the latent variables as well as the the emission probabilities that govern how the observations are generated by the latent dynamics to be parameterized by (non-linear) neural networks.
The specific model we're going to implement is based on the following reference:
[1] `Structured Inference Networks for Nonlinear State Space Models`,<br />
Rahul G. Krishnan, Uri Shalit, David Sontag
Please note that while we do not assume that the reader of this tutorial has read the reference, it's definitely a good place to look for a more comprehensive discussion of the deep markov model in the context of other time series models.
We've described the model, but how do we go about training it? The inference strategy we're going to use is variational inference, which requires specifying a parameterized family of distributions that can be used to approximate the posterior distribution over the latent random variables. Given the non-linearities and complex time-dependencies inherent in our model and data, we expect the exact posterior to be highly non-trivial. So we're going to need a flexible family of variational distributions if we hope to learn a good model. Happily, together Pytorch and Pyro provide all the necessary ingredients. As we will see, assembling them will be straightforward. Let's get to work.
## The Model
A convenient way to describe the high-level structure of the model is with a graphical model.
Here, we've rolled out the model assuming that the sequence of observations is of length three: $\{{\bf x}_1, {\bf x}_2, {\bf x}_3\}$. Mirroring the sequence of observations we also have a sequence of latent random variables: $\{{\bf z}_1, {\bf z}_2, {\bf z}_3\}$. The figure encodes the structure of the model. The corresponding joint distribution is
$$p({\bf x}_{123} , {\bf z}_{123})=p({\bf x}_1|{\bf z}_1)p({\bf x}_2|{\bf z}_2)p({\bf x}_3|{\bf z}_3)p({\bf z}_1)p({\bf z}_2|{\bf z}_1)p({\bf z}_3|{\bf z}_2)$$
Conditioned on ${\bf z}_t$, each observation ${\bf x}_t$ is independent of the other observations. This can be read off from the fact that each ${\bf x}_t$ only depends on the corresponding latent ${\bf z}_t$, as indicated by the downward pointing arrows. We can also read off the markov property of the model: each latent ${\bf z}_t$, when conditioned on the previous latent ${\bf z}_{t-1}$, is independent of all previous latents $\{ {\bf z}_{t-2}, {\bf z}_{t-3}, ...\}$. This effectively says that everything one needs to know about the state of the system at time $t$ is encapsulated by the latent ${\bf z}_{t}$.
We will assume that the observation likelihoods, i.e. the probability distributions $p({{\bf x}_t}|{{\bf z}_t})$ that control the observations, are given by the bernoulli distribution. This is an appropriate choice since our observations are all 0 or 1. For the probability distributions $p({\bf z}_t|{\bf z}_{t-1})$ that control the latent dynamics, we choose (conditional) gaussian distributions with diagonal covariances. This is reasonable since we assume that the latent space is continuous.
The solid black squares represent non-linear functions parameterized by neural networks. This is what makes this a _deep_ markov model. Note that the black squares appear in two different places: in between pairs of latents and in between latents and observations. The non-linear function that connects the latent variables ('Trans' in Fig. 1) controls the dynamics of the latent variables. Since we allow the conditional probability distribution of ${\bf z}_{t}$ to depend on ${\bf z}_{t-1}$ in a complex way, we will be able to capture complex dynamics in our model. Similarly, the non-linear function that connects the latent variables to the observations ('Emit' in Fig. 1) controls how the observations depend on the latent dynamics.
Some additional notes:
- we can freely choose the dimension of the latent space to suit the problem at hand: small latent spaces for simple problems and larger latent spaces for problems with complex dynamics
- note the parameter ${\bf z}_0$ in Fig. 1. as will become more apparent from the code, this is just a convenient way for us to parameterize the probability distribution $p({\bf z}_1)$ for the first time step, where there are no previous latents to condition on.
### The Gated Transition and the Emitter
Without further ado, let's start writing some code. We first define the two Pytorch Modules that correspond to the black squares in Fig. 1. First the emission function:
```
class Emitter(nn.Module):
"""
Parameterizes the bernoulli observation likelihood p(x_t | z_t)
"""
def __init__(self, input_dim, z_dim, emission_dim):
super(Emitter, self).__init__()
# initialize the three linear transformations used in the neural network
self.lin_z_to_hidden = nn.Linear(z_dim, emission_dim)
self.lin_hidden_to_hidden = nn.Linear(emission_dim, emission_dim)
self.lin_hidden_to_input = nn.Linear(emission_dim, input_dim)
# initialize the two non-linearities used in the neural network
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, z_t):
"""
Given the latent z at a particular time step t we return the vector of
probabilities `ps` that parameterizes the bernoulli distribution p(x_t|z_t)
"""
h1 = self.relu(self.lin_z_to_hidden(z_t))
h2 = self.relu(self.lin_hidden_to_hidden(h1))
ps = self.sigmoid(self.lin_hidden_to_input(h2))
return ps
```
In the constructor we define the linear transformations that will be used in our emission function. Note that `emission_dim` is the number of hidden units in the neural network. We also define the non-linearities that we will be using. The forward call defines the computational flow of the function. We take in the latent ${\bf z}_{t}$ as input and do a sequence of transformations until we obtain a vector of length 88 that defines the emission probabilities of our bernoulli likelihood. Because of the sigmoid, each element of `ps` will be between 0 and 1 and will define a valid probability. Taken together the elements of `ps` encode which notes we expect to observe at time $t$ given the state of the system (as encoded in ${\bf z}_{t}$).
Now we define the gated transition function:
```
class GatedTransition(nn.Module):
"""
Parameterizes the gaussian latent transition probability p(z_t | z_{t-1})
See section 5 in the reference for comparison.
"""
def __init__(self, z_dim, transition_dim):
super(GatedTransition, self).__init__()
# initialize the six linear transformations used in the neural network
self.lin_gate_z_to_hidden = nn.Linear(z_dim, transition_dim)
self.lin_gate_hidden_to_z = nn.Linear(transition_dim, z_dim)
self.lin_proposed_mean_z_to_hidden = nn.Linear(z_dim, transition_dim)
self.lin_proposed_mean_hidden_to_z = nn.Linear(transition_dim, z_dim)
self.lin_sig = nn.Linear(z_dim, z_dim)
self.lin_z_to_mu = nn.Linear(z_dim, z_dim)
# modify the default initialization of lin_z_to_mu
# so that it's starts out as the identity function
self.lin_z_to_mu.weight.data = torch.eye(z_dim)
self.lin_z_to_mu.bias.data = torch.zeros(z_dim)
# initialize the three non-linearities used in the neural network
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.softplus = nn.Softplus()
def forward(self, z_t_1):
"""
Given the latent z_{t-1} corresponding to the time step t-1
we return the mean and sigma vectors that parameterize the
(diagonal) gaussian distribution p(z_t | z_{t-1})
"""
# compute the gating function and one minus the gating function
gate_intermediate = self.relu(self.lin_gate_z_to_hidden(z_t_1))
gate = self.sigmoid(self.lin_gate_hidden_to_z(gate_intermediate))
one_minus_gate = ng_ones(gate.size()).type_as(gate) - gate
# compute the 'proposed mean'
proposed_mean_intermediate = self.relu(self.lin_proposed_mean_z_to_hidden(z_t_1))
proposed_mean = self.lin_proposed_mean_hidden_to_z(proposed_mean_intermediate)
# assemble the actual mean used to sample z_t, which mixes a linear transformation
# of z_{t-1} with the proposed mean modulated by the gating function
mu = one_minus_gate * self.lin_z_to_mu(z_t_1) + gate * proposed_mean
# compute the sigma used to sample z_t, using the proposed mean from above as input
# the softplus ensures that sigma is positive
sigma = self.softplus(self.lin_sig(self.relu(proposed_mean)))
# return mu, sigma which can be fed into Normal
return mu, sigma
```
This mirrors the structure of `Emitter` above, with the difference that the computational flow is a bit more complicated. This is for two reasons. First, the output of `GatedTransition` needs to define a valid (diagonal) gaussian distribution. So we need to output two parameters: the mean `mu`, and the (square root) covariance `sigma`. These both need to have the same dimension as the latent space. Second, we don't want to _force_ the dynamics to be non-linear. Thus our mean `mu` is a sum of two terms, only one of which depends non-linearily on the input `z_t_1`. This way we can support both linear and non-linear dynamics (or indeed have the dynamics of part of the latent space be linear, while the remainder of the dynamics is non-linear).
### Model - a Pyro Stochastic Function
So far everything we've done is pure Pytorch. To finish translating our model into code we need to bring Pyro into the picture. Basically we need to implement the stochastic nodes (i.e. the circles) in Fig. 1. To do this we introduce a callable `model()` that contains the Pyro primitives `pyro.sample` and `pyro.observe`. The `sample` statements will be used to specify the joint distribution over the latents ${\bf z}_{1:T}$. The `observe` statements will specify how the observations ${\bf x}_{1:T}$ depend on the latents. Before we look at the complete code for `model()`, let's look at a stripped down version that contains the main logic:
```
def model(...):
z_prev = self.z_0
# sample the latents z and observed x's one time step at a time
for t in range(1, T_max + 1):
# the next two lines of code sample z_t ~ p(z_t | z_{t-1})
# first compute the parameters of the diagonal gaussian distribution p(z_t | z_{t-1})
z_mu, z_sigma = self.trans(z_prev)
# then sample z_t according to dist.Normal(z_mu, z_sigma)
z_t = pyro.sample("z_%d" % t, dist.Normal, z_mu, z_sigma)
# compute the probabilities that parameterize the bernoulli likelihood
emission_probs_t = self.emitter(z_t)
# the next statement instructs pyro to observe x_t according to the
# bernoulli distribution p(x_t|z_t)
pyro.observe("obs_x_%d" % t, dist.bernoulli,
mini_batch[:, t - 1, :], emission_probs_t)
# the latent sampled at this time step will be conditioned upon
# in the next time step so keep track of it
z_prev = z_t
```
The first thing we need to do is sample ${\bf z}_1$. Once we've sampled ${\bf z}_1$, we can sample ${\bf z}_2 \sim p({\bf z}_2|{\bf z}_1)$ and so on. This is the logic implemented in the `for` loop. The parameters `z_mu` and `z_sigma` that define the probability distributions $p({\bf z}_t|{\bf z}_{t-1})$ are computed using `self.trans`, which is just an instance of the `GatedTransition` module defined above. For the first time step at $t=1$ we condition on `self.z_0`, which is a (trainable) `Parameter`, while for subsequent time steps we condition on the previously drawn latent. Note that each random variable `z_t` is assigned a unique name by the user.
Once we've sampled ${\bf z}_t$ at a given time step, we need to observe the datapoint ${\bf x}_t$. So we pass `z_t` through `self.emitter`, an instance of the `Emitter` module defined above to obtain `emission_probs_t`. Together with the argument `dist.bernoulli` in the `observe` statement, these probabilities fully specify the observation likelihood. Finally, we also specify the slice of observed data ${\bf x}_t$: `mini_batch[:, t - 1, :]`.
This fully specifies our model and encapsulates it in a callable that can be passed to Pyro. Before we move on let's look at the full version of `model()` and go through some of the details we glossed over in our first pass.
```
def model(self, mini_batch, mini_batch_reversed, mini_batch_mask,
mini_batch_seq_lengths, annealing_factor=1.0):
# this is the number of time steps we need to process in the mini-batch
T_max = mini_batch.size(1)
# register all pytorch (sub)modules with pyro
pyro.module("dmm", self)
# set z_prev = z_0 to setup the recursive conditioning
z_prev = self.z_0
# sample the latents z and observed x's one time step at a time
for t in range(1, T_max + 1):
# the next three lines of code sample z_t ~ p(z_t | z_{t-1})
# first compute the parameters of the diagonal gaussian distribution p(z_t | z_{t-1})
z_mu, z_sigma = self.trans(z_prev)
# then sample z_t according to dist.Normal(z_mu, z_sigma)
z_t = pyro.sample("z_%d" % t, dist.Normal, z_mu, z_sigma,
log_pdf_mask=annealing_factor * mini_batch_mask[:, t - 1:t])
# compute the probabilities that parameterize the bernoulli likelihood
emission_probs_t = self.emitter(z_t)
# the next statement instructs pyro to observe x_t according to the
# bernoulli distribution p(x_t|z_t)
pyro.observe("obs_x_%d" % t, dist.bernoulli, mini_batch[:, t - 1, :],
emission_probs_t,
log_pdf_mask=mini_batch_mask[:, t - 1:t])
# the latent sampled at this time step will be conditioned upon
# in the next time step so keep track of it
z_prev = z_t
```
The first thing to note is that `model()` takes a number of arguments. For now let's just take a look at `mini_batch` and `mini_batch_mask`. `mini_batch` is a three dimensional tensor, with the first dimension being the batch dimension, the second dimension being the temporal dimension, and the final dimension being the features (88-dimensional in our case). To speed up the code, whenever we run `model` we're going to process an entire mini-batch of sequences (i.e. we're going to take advantage of vectorization).
This is sensible because our model is implicitly defined over a single observed sequence. The probability of a set of sequences is just given by the products of the individual sequence probabilities. In other words, given the parameters of the model the sequences are conditionally independent.
This vectorization introduces some complications because sequences can be of different lengths. This is where `mini_batch_mask` comes in. `mini_batch_mask` is a two dimensional 0/1 mask of dimensions `mini_batch_size` x `T_max`, where `T_max` is the maximum length of any sequence in the mini-batch. This encodes which parts of `mini_batch` are valid observations.
So the first thing we do is grab `T_max`: we have to unroll our model for at least this many time steps. Note that this will result in a lot of 'wasted' computation, since some of the sequences will be shorter than `T_max`, but this is a small price to pay for the big speed-ups that come with vectorization. We just need to make sure that none of the 'wasted' computations 'pollute' our model computation. We accomplish this by passing the mask appropriate to time step $t$ as an argument `log_pdf_mask` to both the `sample` and `observe` statements.
Finally, the line `pyro.module("dmm", self)` is equivalent to a bunch of `pyro.param` statements for each parameter in the model. This lets Pyro know which parameters are part of the model. Just like for `sample` and `observe` statements, we give the module a unique name. This name will be incorporated into the name of the `Parameters` in the model. We leave a discussion of the KL annealing factor for later.
## Inference
At this point we've fully specified our model. The next step is to set ourselves up for inference. As mentioned in the introduction, our inference strategy is going to be variational inference (see [SVI Part I](svi_part_i.html) for an introduction). So our next task is to build a family of variational distributions appropriate to doing inference in a deep markov model. However, at this point it's worth emphasizing that nothing about the way we've implemented `model()` ties us to variational inference. In principle we could use _any_ inference strategy available in Pyro. For example, in this particular context one could imagine using some variant of Sequential Monte Carlo (although this is not currently supported in Pyro).
### Guide
The purpose of the guide (i.e. the variational distribution) is to provide a (parameterized) approximation to the exact posterior $p({\bf z}_{1:T}|{\bf x}_{1:T})$. Actually, there's an implicit assumption here which we should make explicit, so let's take a step back.
Suppose our dataset $\mathcal{D}$ consists of $N$ sequences
$\{ {\bf x}_{1:T_1}^1, {\bf x}_{1:T_2}^2, ..., {\bf x}_{1:T_N}^N \}$. Then the posterior we're actually interested in is given by
$p({\bf z}_{1:T_1}^1, {\bf z}_{1:T_2}^2, ..., {\bf z}_{1:T_N}^N | \mathcal{D})$, i.e. we want to infer the latents for _all_ $N$ sequences. Even for small $N$ this is a very high-dimensional distribution that will require a very large number of parameters to specify. In particular if we were to directly parameterize the posterior in this form, the number of parameters required would grow (at least) linearly with $N$. One way to avoid this nasty growth with the size of the dataset is *amortization* (see the analogous discussion in [SVI Part II](http://pyro.ai/examples/svi_part_ii.html)).
#### Aside: Amortization
This works as follows. Instead of introducing variational parameters for each sequence in our dataset, we're going to learn a single parametric function $f({\bf x}_{1:T})$ and work with a variational distribution that has the form $\prod_{n=1}^N q({\bf z}_{1:T_n}^n | f({\bf x}_{1:T_n}^n))$. The function $f(\cdot)$—which basically maps a given observed sequence to a set of variational parameters tailored to that sequence—will need to be sufficiently rich to capture the posterior accurately, but now we can handle large datasets without having to introduce an obscene number of variational parameters.
So our task is to construct the function $f(\cdot)$. Since in our case we need to support variable-length sequences, it's only natural that $f(\cdot)$ have a RNN in the loop. Before we look at the various component parts that make up our $f(\cdot)$ in detail, let's look at a computational graph that encodes the basic structure: <p>
At the bottom of the figure we have our sequence of three observations. These observations will be consumed by a RNN that reads the observations from right to left and outputs three hidden states $\{ {\bf h}_1, {\bf h}_2,{\bf h}_3\}$. Note that this computation is done _before_ we sample any latent variables. Next, each of the hidden states will be fed into a `Combiner` module whose job is to output the mean and covariance of the the conditional distribution $q({\bf z}_t | {\bf z}_{t-1}, {\bf x}_{t:T})$, which we take to be given by a diagonal gaussian distribution. (Just like in the model, the conditional structure of ${\bf z}_{1:T}$ in the guide is such that we sample ${\bf z}_t$ forward in time.) In addition to the RNN hidden state, the `Combiner` also takes the latent random variable from the previous time step as input, except for $t=1$, where it instead takes the trainable (variational) parameter ${\bf z}_0^{\rm{q}}$.
#### Aside: Guide Structure
Why do we setup the RNN to consume the observations from right to left? Why not left to right? With this choice our conditional distribution $q({\bf z}_t |...)$ depends on two things:
- the latent ${\bf z}_{t-1}$ from the previous time step; and
- the observations ${\bf x}_{t:T}$, i.e. the current observation together with all future observations
We are free to make other choices; all that is required is that that the guide is a properly normalized distribution that plays nice with autograd. This particular choice is motivated by the dependency structure of the true posterior: see reference [1] for a detailed discussion. In brief, while we could, for example, condition on the entire sequence of observations, because of the markov structure of the model everything that we need to know about the previous observations ${\bf x}_{1:t-1}$ is encapsulated by ${\bf z}_{t-1}$. We could condition on more things, but there's no need; and doing so will probably tend to dilute the learning signal. So running the RNN from right to left is the most natural choice for this particular model.
So much for the high-level structure of the guide. Let's look at the component parts in detail. First, the `Combiner` module:
```
class Combiner(nn.Module):
"""
Parameterizes q(z_t | z_{t-1}, x_{t:T}), which is the basic building block
of the guide (i.e. the variational distribution). The dependence on x_{t:T} is
through the hidden state of the RNN (see the pytorch module `rnn` below)
"""
def __init__(self, z_dim, rnn_dim):
super(Combiner, self).__init__()
# initialize the three linear transformations used in the neural network
self.lin_z_to_hidden = nn.Linear(z_dim, rnn_dim)
self.lin_hidden_to_mu = nn.Linear(rnn_dim, z_dim)
self.lin_hidden_to_sigma = nn.Linear(rnn_dim, z_dim)
# initialize the two non-linearities used in the neural network
self.tanh = nn.Tanh()
self.softplus = nn.Softplus()
def forward(self, z_t_1, h_rnn):
"""
Given the latent z at at a particular time step t-1 as well as the hidden
state of the RNN h(x_{t:T}) we return the mean and sigma vectors that
parameterize the (diagonal) gaussian distribution q(z_t | z_{t-1}, x_{t:T})
"""
# combine the rnn hidden state with a transformed version of z_t_1
h_combined = 0.5 * (self.tanh(self.lin_z_to_hidden(z_t_1)) + h_rnn)
# use the combined hidden state to compute the mean used to sample z_t
mu = self.lin_hidden_to_mu(h_combined)
# use the combined hidden state to compute the sigma used to sample z_t
sigma = self.softplus(self.lin_hidden_to_sigma(h_combined))
# return mu, sigma which can be fed into Normal
return mu, sigma
```
This module has the same general structure as `Emitter` and `GatedTransition` in the model. The only thing of note is that because the `Combiner` needs to consume two inputs at each time step, it transforms the inputs into a single combined hidden state `h_combined` before it computes the outputs.
Apart from the RNN, we now have all the ingredients we need to construct our guide distribution.
Happily, Pytorch has great built-in RNN modules, so we don't have much work to do here. We'll see where we instantiate the RNN later. Let's instead jump right into the definition of the stochastic function `guide()`.
```
def guide(self, mini_batch, mini_batch_reversed, mini_batch_mask,
mini_batch_seq_lengths, annealing_factor=1.0):
# this is the number of time steps we need to process in the mini-batch
T_max = mini_batch.size(1)
# register all pytorch (sub)modules with pyro
pyro.module("dmm", self)
# if on gpu we need the fully broadcast view of the rnn initial state
# to be in contiguous gpu memory
h_0_contig = self.h_0 if not self.use_cuda \
else self.h_0.expand(1, mini_batch.size(0), self.rnn.hidden_size).contiguous()
# push the observed x's through the rnn;
# rnn_output contains the hidden state at each time step
rnn_output, _ = self.rnn(mini_batch_reversed, h_0_contig)
# reverse the time-ordering in the hidden state and un-pack it
rnn_output = poly.pad_and_reverse(rnn_output, mini_batch_seq_lengths)
# set z_prev = z_q_0 to setup the recursive conditioning in q(z_t |...)
z_prev = self.z_q_0
# sample the latents z one time step at a time
for t in range(1, T_max + 1):
# get the parameters for the distribution q(z_t | z_{t-1}, x_{t:T})
z_mu, z_sigma = self.combiner(z_prev, rnn_output[:, t - 1, :])
# sample z_t from the distribution q(z_t|...)
z_t = pyro.sample("z_%d" % t, dist.Normal, z_mu, z_sigma,
log_pdf_mask=annealing_factor * mini_batch_mask[:, t - 1:t])
# the latent sampled at this time step will be conditioned upon in the next time step
# so keep track of it
z_prev = z_t
```
The high-level structure of `guide()` is very similar to `model()`. First note that the model and guide take the same arguments: this is a general requirement for model/guide pairs in Pyro. As in the model, there's a call to `pyro.module` that registers all the parameters with Pyro. Also, the `for` loop has the same structure as the one in `model()`, with the difference that the guide only needs to sample latents (there are no `observe` statements). Finally, note that the names of the latent variables in the guide exactly match those in the model. This is how Pyro knows to correctly align random variables.
The RNN logic should be familar to Pytorch users, but let's go through it quickly. First we prepare the initial state of the RNN, `h_0`. Then we invoke the RNN via its forward call; the resulting tensor `rnn_output` contains the hidden states for the entire mini-batch. Note that because we want the RNN to consume the observations from right to left, the input to the RNN is `mini_batch_reversed`, which is a copy of `mini_batch` with all the sequences running in _reverse_ temporal order. Furthermore, `mini_batch_reversed` has been wrapped in a Pytorch `rnn.pack_padded_sequence` so that the RNN can deal with variable-length sequences. Since we do our sampling in latent space in normal temporal order, we use the helper function `pad_and_reverse` to reverse the hidden state sequences in `rnn_output`, so that we can feed the `Combiner` RNN hidden states that are correctly aligned and ordered. This helper function also unpacks the `rnn_output` so that it is no longer in the form of a Pytorch `rnn.pack_padded_sequence`.
## Packaging the Model and Guide as a Pytorch Module
At this juncture, we're ready to to proceed to inference. But before we do so let's quickly go over how we packaged the model and guide as a single Pytorch Module. This is generally good practice, especially for larger models.
```
class DMM(nn.Module):
"""
This pytorch Module encapsulates the model as well as the
variational distribution (the guide) for the Deep Markov Model
"""
def __init__(self, input_dim=88, z_dim=100, emission_dim=100,
transition_dim=200, rnn_dim=600, rnn_dropout_rate=0.0,
num_iafs=0, iaf_dim=50, use_cuda=False):
super(DMM, self).__init__()
# instantiate pytorch modules used in the model and guide below
self.emitter = Emitter(input_dim, z_dim, emission_dim)
self.trans = GatedTransition(z_dim, transition_dim)
self.combiner = Combiner(z_dim, rnn_dim)
self.rnn = nn.RNN(input_size=input_dim, hidden_size=rnn_dim, nonlinearity='relu',
batch_first=True, bidirectional=False, num_layers=1, dropout=rnn_dropout_rate)
# define a (trainable) parameters z_0 and z_q_0 that help define the probability
# distributions p(z_1) and q(z_1)
# (since for t = 1 there are no previous latents to condition on)
self.z_0 = nn.Parameter(torch.zeros(z_dim))
self.z_q_0 = nn.Parameter(torch.zeros(z_dim))
# define a (trainable) parameter for the initial hidden state of the rnn
self.h_0 = nn.Parameter(torch.zeros(1, 1, rnn_dim))
self.use_cuda = use_cuda
# if on gpu cuda-ize all pytorch (sub)modules
if use_cuda:
self.cuda()
# the model p(x_{1:T} | z_{1:T}) p(z_{1:T})
def model(...):
# ... as above ...
# the guide q(z_{1:T} | x_{1:T}) (i.e. the variational distribution)
def guide(...):
# ... as above ...
```
Since we've already gone over `model` and `guide`, our focus here is on the constructor. First we instantiate the four Pytorch modules that we use in our model and guide. On the model-side: `Emitter` and `GatedTransition`. On the guide-side: `Combiner` and the RNN.
Next we define Pytorch `Parameter`s for the initial state of the RNN as well as `z_0` and `z_q_0`, which are fed into `self.trans` and `self.combiner`, respectively, in lieu of the non-existent random variable $\bf z_0$.
The important point to make here is that all of these `Module`s and `Parameter`s are attributes of `DMM` (which itself inherits from `nn.Module`). This has the consequence they are all automatically registered as belonging to the module. So, for example, when we call `parameters()` on an instance of `DMM`, Pytorch will know to return all the relevant parameters. It also means that when we invoke `pyro.module("dmm", self)` in `model()` and `guide()`, all the parameters of both the model and guide will be registered with Pyro. Finally, it means that if we're running on a GPU, the call to `cuda()` will move all the parameters into GPU memory.
## Stochastic Variational Inference
With our model and guide at hand, we're finally ready to do inference. Before we look at the full logic that is involved in a complete experimental script, let's first see how to take a single gradient step. First we instantiate an instance of `DMM` and setup an optimizer.
```
# instantiate the dmm
dmm = DMM(input_dim, z_dim, emission_dim, transition_dim, rnn_dim,
args.rnn_dropout_rate, args.num_iafs, args.iaf_dim, args.cuda)
# setup optimizer
adam_params = {"lr": args.learning_rate, "betas": (args.beta1, args.beta2),
"clip_norm": args.clip_norm, "lrd": args.lr_decay,
"weight_decay": args.weight_decay}
optimizer = ClippedAdam(adam_params)
```
Here we're using an implementation of the Adam optimizer that includes gradient clipping. This mitigates some of the problems that can occur when training recurrent neural networks (e.g. vanishing/exploding gradients). Next we setup the inference algorithm.
```
# setup inference algorithm
svi = SVI(dmm.model, dmm.guide, optimizer, "ELBO", trace_graph=False)
```
The inference algorithm `SVI` uses a stochastic gradient estimator to take gradient steps on an objective function, which in this case is given by the ELBO (the evidence lower bound). As the name indicates, the ELBO is a lower bound to the log evidence: $\log p(\mathcal{D})$. As we take gradient steps that maximize the ELBO, we move our guide $q(\cdot)$ closer to the exact posterior.
The argument `trace_graph=False` indicates that we're using a version of the gradient estimator that doesn't need access to the dependency structure of the model and guide. Since all the latent variables in our model are reparameterizable, this is the appropriate gradient estimator for our use case. (It's also the default option.)
Assuming we've prepared the various arguments of `dmm.model` and `dmm.guide`, taking a gradient step is accomplished by calling
```
svi.step(mini_batch, ...)
```
That's all there is to it!
Well, not quite. This will be the main step in our inference algorithm, but we still need to implement a complete training loop with preparation of mini-batches, evaluation, and so on. This sort of logic will be familiar to any deep learner but let's see how it looks in PyTorch/Pyro.
## The Black Magic of Optimization
Actually, before we get to the guts of training, let's take a moment and think a bit about the optimization problem we've setup. We've traded Bayesian inference in a non-linear model with a high-dimensional latent space—a hard problem—for a particular optimization problem. Let's not kid ourselves, this optimization problem is pretty hard too. Why? Let's go through some of the reasons:
- the space of parameters we're optimizing over is very high-dimensional (it includes all the weights in all the neural networks we've defined).
- our objective function (the ELBO) cannot be computed analytically. so our parameter updates will be following noisy Monte Carlo gradient estimates
- data-subsampling serves as an additional source of stochasticity: even if we wanted to, we couldn't in general take gradient steps on the ELBO defined over the whole dataset (actually in our particular case the dataset isn't so large, but let's ignore that).
- given all the neural networks and non-linearities we have in the loop, our (stochastic) loss surface is highly non-trivial
The upshot is that if we're going to find reasonable (local) optima of the ELBO, we better take some care in deciding how to do optimization. This isn't the time or place to discuss all the different strategies that one might adopt, but it's important to emphasize how decisive a good or bad choice in learning hyperparameters (the learning rate, the mini-batch size, etc.) can be.
Before we move on, let's discuss one particular optimization strategy that we're making use of in greater detail: KL annealing. In our case the ELBO is the sum of two terms: an expected log likelihood term (which measures model fit) and a sum of KL divergence terms (which serve to regularize the approximate posterior):
$\rm{ELBO} = \mathbb{E}_{q({\bf z}_{1:T})}[\log p({\bf x}_{1:T}|{\bf z}_{1:T})] - \mathbb{E}_{q({\bf z}_{1:T})}[ \log q({\bf z}_{1:T}) - \log p({\bf z}_{1:T})]$
This latter term can be a quite strong regularizer, and in early stages of training it has a tendency to favor regions of the loss surface that contain lots of bad local optima. One strategy to avoid these bad local optima, which was also adopted in reference [1], is to anneal the KL divergence terms by multiplying them by a scalar `annealing_factor` that ranges between zero and one:
$\mathbb{E}_{q({\bf z}_{1:T})}[\log p({\bf x}_{1:T}|{\bf z}_{1:T})] - \rm{annealing\_factor} \times \mathbb{E}_{q({\bf z}_{1:T})}[ \log q({\bf z}_{1:T}) - \log p({\bf z}_{1:T})]$
The idea is that during the course of training the `annealing_factor` rises slowly from its initial value at/near zero to its final value at 1.0. The annealing schedule is arbitrary; below we will use a simple linear schedule.
Finally, we should mention that the main difference between the DMM implementation described here and the one used in reference [1] is that they take advantage of the analytic formula for the KL divergence between two gaussian distributions (whereas we rely on Monte Carlo estimates). This leads to lower variance gradient estimates of the ELBO, which makes training a bit easier. We can still train the model without making this analytic substitution, but training probably takes somewhat longer because of the higher variance. Support for analytic KL divergences in Pyro is something we plan to add in the near future.
## Data Loading, Training, and Evaluation
First we load the data. There are 229 sequences in the training dataset, each with an average length of ~60 time steps.
```
jsb_file_loc = "./data/jsb_processed.pkl"
data = pickle.load(open(jsb_file_loc, "rb"))
training_seq_lengths = data['train']['sequence_lengths']
training_data_sequences = data['train']['sequences']
test_seq_lengths = data['test']['sequence_lengths']
test_data_sequences = data['test']['sequences']
val_seq_lengths = data['valid']['sequence_lengths']
val_data_sequences = data['valid']['sequences']
N_train_data = len(training_seq_lengths)
N_train_time_slices = np.sum(training_seq_lengths)
N_mini_batches = int(N_train_data / args.mini_batch_size +
int(N_train_data % args.mini_batch_size > 0))
```
For this dataset we will typically use a `mini_batch_size` of 20, so that there will be 12 mini-batches per epoch. Next we define the function `process_minibatch` which prepares a mini-batch for training and takes a gradient step:
```
def process_minibatch(epoch, which_mini_batch, shuffled_indices):
if args.annealing_epochs > 0 and epoch < args.annealing_epochs:
# compute the KL annealing factor approriate for the current mini-batch in the current epoch
min_af = args.minimum_annealing_factor
annealing_factor = min_af + (1.0 - min_af) * \
(float(which_mini_batch + epoch * N_mini_batches + 1) /
float(args.annealing_epochs * N_mini_batches))
else:
# by default the KL annealing factor is unity
annealing_factor = 1.0
# compute which sequences in the training set we should grab
mini_batch_start = (which_mini_batch * args.mini_batch_size)
mini_batch_end = np.min([(which_mini_batch + 1) * args.mini_batch_size, N_train_data])
mini_batch_indices = shuffled_indices[mini_batch_start:mini_batch_end]
# grab the fully prepped mini-batch using the helper function in the data loader
mini_batch, mini_batch_reversed, mini_batch_mask, mini_batch_seq_lengths \
= poly.get_mini_batch(mini_batch_indices, training_data_sequences,
training_seq_lengths, cuda=args.cuda)
# do an actual gradient step
loss = svi.step(mini_batch, mini_batch_reversed, mini_batch_mask,
mini_batch_seq_lengths, annealing_factor)
# keep track of the training loss
return loss
```
We first compute the KL annealing factor appropriate to the mini-batch (according to a linear schedule as described earlier). We then compute the mini-batch indices, which we pass to the helper function `get_mini_batch`. This helper function takes care of a number of different things:
- it sorts each mini-batch by sequence length
- it calls another helper function to get a copy of the mini-batch in reversed temporal order
- it packs each reversed mini-batch in a `rnn.pack_padded_sequence`, which is then ready to be ingested by the RNN
- it cuda-izes all tensors if we're on a GPU
- it calls another helper function to get an appropriate 0/1 mask for the mini-batch
We then pipe all the return values of `get_mini_batch()` into `elbo.step(...)`. Recall that these arguments will be further piped to `model(...)` and `guide(...)` during construction of the gradient estimator in `elbo`. Finally, we return a float which is a noisy estimate of the loss for that mini-batch.
We now have all the ingredients required for the main bit of our training loop:
```
times = [time.time()]
for epoch in range(args.num_epochs):
# accumulator for our estimate of the negative log likelihood
# (or rather -elbo) for this epoch
epoch_nll = 0.0
# prepare mini-batch subsampling indices for this epoch
shuffled_indices = np.arange(N_train_data)
np.random.shuffle(shuffled_indices)
# process each mini-batch; this is where we take gradient steps
for which_mini_batch in range(N_mini_batches):
epoch_nll += process_minibatch(epoch, which_mini_batch, shuffled_indices)
# report training diagnostics
times.append(time.time())
epoch_time = times[-1] - times[-2]
log("[training epoch %04d] %.4f \t\t\t\t(dt = %.3f sec)" %
(epoch, epoch_nll / N_train_time_slices, epoch_time))
```
At the beginning of each epoch we shuffle the indices pointing to the training data. We then process each mini-batch until we've gone through the entire training set, accumulating the training loss as we go. Finally we report some diagnostic info. Note that we normalize the loss by the total number of time slices in the training set (this allows us to compare to reference [1]).
## Evaluation
This training loop is still missing any kind of evaluation diagnostics. Let's fix that. First we need to prepare the validation and test data for evaluation. Since the validation and test datasets are small enough that we can easily fit them into memory, we're going to process each dataset batchwise (i.e. we will not be breaking up the dataset into mini-batches). [_Aside: at this point the reader may ask why we don't do the same thing for the training set. The reason is that additional stochasticity due to data-subsampling is often advantageous during optimization: in particular it can help us avoid local optima._] And, in fact, in order to get a lessy noisy estimate of the ELBO, we're going to compute a multi-sample estimate. The simplest way to do this would be as follows:
```
val_loss = svi.evaluate_loss(val_batch, ..., num_particles=5)
```
This, however, would involve an explicit `for` loop with five iterations. For our particular model, we can do better and vectorize the whole computation. The only way to do this currently in Pyro is to explicitly replicate the data `n_eval_samples` many times. This is the strategy we follow:
```
# package repeated copies of val/test data for faster evaluation
# (i.e. set us up for vectorization)
def rep(x):
return np.repeat(x, n_eval_samples, axis=0)
# get the validation/test data ready for the dmm: pack into sequences, etc.
val_seq_lengths = rep(val_seq_lengths)
test_seq_lengths = rep(test_seq_lengths)
val_batch, val_batch_reversed, val_batch_mask, val_seq_lengths = poly.get_mini_batch(
np.arange(n_eval_samples * val_data_sequences.shape[0]), rep(val_data_sequences),
val_seq_lengths, volatile=True, cuda=args.cuda)
test_batch, test_batch_reversed, test_batch_mask, test_seq_lengths = poly.get_mini_batch(
np.arange(n_eval_samples * test_data_sequences.shape[0]), rep(test_data_sequences),
test_seq_lengths, volatile=True, cuda=args.cuda)
```
Note that we make use of the same helper function `get_mini_batch` as before, except this time we select the entire datasets. Also, we mark the data as `volatile`, which lets Pytorch know that we won't be computing any gradients; this results in further speed-ups. With the test and validation data now fully prepped, we define the helper function that does the evaluation:
```
def do_evaluation():
# put the RNN into evaluation mode (i.e. turn off drop-out if applicable)
dmm.rnn.eval()
# compute the validation and test loss
val_nll = svi.evaluate_loss(val_batch, val_batch_reversed, val_batch_mask,
val_seq_lengths) / np.sum(val_seq_lengths)
test_nll = svi.evaluate_loss(test_batch, test_batch_reversed, test_batch_mask,
test_seq_lengths) / np.sum(test_seq_lengths)
# put the RNN back into training mode (i.e. turn on drop-out if applicable)
dmm.rnn.train()
return val_nll, test_nll
```
We simply call the `evaluate_loss` method of `elbo`, which takes the same arguments as `step()`, namely the arguments that are passed to the model and guide. Note that we have to put the RNN into and out of evaluation mode to account for dropout. We can now stick `do_evaluation()` into the training loop; see `dmm.py` for details.
## Results
Let's make sure that our implementation gives reasonable results. We can use the numbers reported in reference [1] as a sanity check. For the same dataset and a similar model/guide setup (dimension of the latent space, number of hidden units in the RNN, etc.) they report a normalized negative log likelihood (NLL) of `6.93` on the testset (lower is better$)^{\S}$. This is to be compared to our result of `6.87`. These numbers are very much in the same ball park, which is reassuring. It seems that, at least for this dataset, not using analytic expressions for the KL divergences doesn't degrade the quality of the learned model (although, as discussed above, the training probably takes somewhat longer).
In the figure we show how the test NLL progresses during training for a single sample run (one with a rather conservative learning rate). Most of the progress is during the first 3000 epochs or so, with some marginal gains if we let training go on for longer. On a GeForce GTX 1080, 5000 epochs takes about 20 hours.
| `num_iafs` | test NLL |
|---|---|
| `0` | `6.87` |
| `1` | `6.82` |
| `2` | `6.80` |
Finally, we also report results for guides with normalizing flows in the mix (details to be found in the next section).
${ \S\;}$ Actually, they seem to report two numbers—6.93 and 7.03—for the same model/guide and it's not entirely clear how the two reported numbers are different.
## Bells, whistles, and other improvements
### Inverse Autoregressive Flows
One of the great things about a probabilistic programming language is that it encourages modularity. Let's showcase an example in the context of the DMM. We're going to make our variational distribution richer by adding normalizing flows to the mix (see reference [2] for a discussion). **This will only cost us four additional lines of code!**
First, in the `DMM` constructor we add
```
iafs = [InverseAutoregressiveFlow(z_dim, iaf_dim) for _ in range(num_iafs)]
self.iafs = nn.ModuleList(iafs)
```
This instantiates `num_iafs` many normalizing flows of the `InverseAutoregressiveFlow` type (see references [3,4]); each normalizing flow will have `iaf_dim` many hidden units. We then bundle the normalizing flows in a `nn.ModuleList`; this is just the PyTorchy way to package a list of `nn.Module`s. Next, in the guide we add the lines
```
if self.iafs.__len__() > 0:
z_dist = TransformedDistribution(z_dist, self.iafs)
```
Here we're taking the base distribution `z_dist`, which in our case is a conditional gaussian distribution, and using the `TransformedDistribution` construct we transform it into a non-gaussian distribution that is, by construction, richer than the base distribution. Voila!
### Checkpointing
If we want to recover from a catastrophic failure in our training loop, there are two kinds of state we need to keep track of. The first is the various parameters of the model and guide. The second is the state of the optimizers (e.g. in Adam this will include the running average of recent gradient estimates for each parameter).
In Pyro, the parameters can all be found in the `ParamStore`. However, Pytorch also keeps track of them for us via the `parameters()` method of `nn.Module`. So one simple way we can save the parameters of the model and guide is to make use of the `state_dict()` method of `dmm` in conjunction with `torch.save()`; see below. In the case that we have `InverseAutoregressiveFlow`'s in the loop, this is in fact the only option at our disposal. This is because the `InverseAutoregressiveFlow` module contains what are called 'persistent buffers' in PyTorch parlance. These are things that carry state but are not `Parameter`s. The `state_dict()` and `load_state_dict()` methods of `nn.Module` know how to deal with buffers correctly.
To save the state of the optimizers, we have to use functionality inside of `pyro.optim.PyroOptim`. Recall that the typical user never interacts directly with PyTorch `Optimizers` when using Pyro; since parameters can be created dynamically in an arbitrary probabilistic program, Pyro needs to manage `Optimizers` for us. In our case saving the optimizer state will be as easy as calling `optimizer.save()`. The loading logic is entirely analagous. So our entire logic for saving and loading checkpoints only takes a few lines:
```
# saves the model and optimizer states to disk
def save_checkpoint():
log("saving model to %s..." % args.save_model)
torch.save(dmm.state_dict(), args.save_model)
log("saving optimizer states to %s..." % args.save_opt)
optimizer.save(args.save_opt)
log("done saving model and optimizer checkpoints to disk.")
# loads the model and optimizer states from disk
def load_checkpoint():
assert exists(args.load_opt) and exists(args.load_model), \
"--load-model and/or --load-opt misspecified"
log("loading model from %s..." % args.load_model)
dmm.load_state_dict(torch.load(args.load_model))
log("loading optimizer states from %s..." % args.load_opt)
optimizer.load(args.load_opt)
log("done loading model and optimizer states.")
```
## Some final comments
A deep markov model is a relatively complex model. Now that we've taken the effort to implement a version of the deep markov model tailored to the polyphonic music dataset, we should ask ourselves what else we can do. What if we're handed a different sequential dataset? Do we have to start all over?
Not at all! The beauty of probalistic programming is that it enables—and encourages—modular approaches to modeling and inference. Adapting our polyphonic music model to a dataset with continuous observations is as simple as changing the observation likelihood. The vast majority of the code could be taken over unchanged. This means that with a little bit of extra work, the code in this tutorial could be repurposed to enable a huge variety of different models.
## References
[1] `Structured Inference Networks for Nonlinear State Space Models`,<br />
Rahul G. Krishnan, Uri Shalit, David Sontag
[2] `Variational Inference with Normalizing Flows`,
<br />
Danilo Jimenez Rezende, Shakir Mohamed
[3] `Improving Variational Inference with Inverse Autoregressive Flow`,
<br />
Diederik P. Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya Sutskever, Max Welling
[4] `MADE: Masked Autoencoder for Distribution Estimation Mathieu`,
<br />
Germain, Karol Gregor, Iain Murray, Hugo Larochelle
[5] `Modeling Temporal Dependencies in High-Dimensional Sequences:`
<br />
`Application to Polyphonic Music Generation and Transcription`,
<br />
Boulanger-Lewandowski, N., Bengio, Y. and Vincent, P.
| github_jupyter |
<a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a>
$ \newcommand{\bra}[1]{\langle #1|} $
$ \newcommand{\ket}[1]{|#1\rangle} $
$ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
$ \newcommand{\dot}[2]{ #1 \cdot #2} $
$ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
$ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
$ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
$ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
$ \newcommand{\mypar}[1]{\left( #1 \right)} $
$ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
$ \newcommand{\onehalf}{\frac{1}{2}} $
$ \newcommand{\donehalf}{\dfrac{1}{2}} $
$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
$ \newcommand{\vzero}{\myvector{1\\0}} $
$ \newcommand{\vone}{\myvector{0\\1}} $
$ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $
$ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
$ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
$ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $
$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
$ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
$ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $
$ \newcommand{\greenbit}[1] {\mathbf{{\color{green}#1}}} $
$ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}#1}}} $
$ \newcommand{\redbit}[1] {\mathbf{{\color{red}#1}}} $
$ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}#1}}} $
$ \newcommand{\blackbit}[1] {\mathbf{{\color{black}#1}}} $
<font style="font-size:28px;" align="left"><b>Two Probabilistic Bits </b></font>
<br>
_prepared by Abuzer Yakaryilmaz_
<br><br>
[<img src="../qworld/images/watch_lecture.jpg" align="left">](https://youtu.be/ulbd-1c71sk)
<br><br><br>
Suppose that we have two probabilistic bits, and our probabilistic states respectively are
$ \myvector{0.2 \\ 0.8} \mbox{ and } \myvector{0.6 \\ 0.4 }. $
If we combine both bits as a single system, then what is the state of the combined system?
In total, we have four different states. We can name them as follows:
<ul>
<li>00: both bits are in states 0</li>
<li>01: the first bit is in state 0 and the second bit is in state 1</li>
<li>10: the first bit is in state 1 and the second bit is in state 0</li>
<li>11: both bits are in states 1</li>
</ul>
<h3> Task 1 </h3>
<b>Discussion and analysis:</b>
What are the probabilities of being in states $ 00 $, $ 01 $, $ 10 $, and $11$?
How can we represent these probabilities as a column vector?
<h3> Representation for states 0 and 1</h3>
The vector representation of state 0 is $ \myvector{1 \\ 0} $. Similarly, the vector representation of state 1 is $ \myvector{0 \\ 1} $.
We use $ \pstate{0} $ to represent $ \myvector{1 \\ 0} $ and $ \pstate{1} $ to represent $ \myvector{0 \\ 1} $.
Then, the probabilistic state $ \myvector{0.2 \\ 0.8} $ is also represented as $ 0.2 \pstate{0} + 0.8 \pstate{1} $.
Similarly, the probabilistic state $ \myvector{0.6 \\ 0.4} $ is also represented as $ 0.6 \pstate{0} + 0.4 \pstate{1} $.
<h3> Composite systems </h3>
When two systems are composed, then their states are tensored to calculate the state of composite system.
The probabilistic state of the first bit is $ \myvector{0.2 \\ 0.8} = 0.2 \pstate{0} + 0.8 \pstate{1} $.
The probabilistic state of the second bit is $ \myvector{0.6 \\ 0.4} = 0.6 \pstate{0} + 0.4 \pstate{1} $.
Then, the probabilistic state of the composite system is $ \big( 0.2 \pstate{0} + 0.8 \pstate{1} \big) \otimes \big( 0.6 \pstate{0} + 0.4 \pstate{1} \big) $.
<h3> Task 2 </h3>
Find the probabilistic state of the composite system.
<i>
Rule 1: Tensor product distributes over addition in the same way as the distribution of multiplication over addition.
Rule 2: $ \big( 0.3 \pstate{1} \big) \otimes \big( 0.7 \pstate{0} \big) = (0.3 \cdot 0.7) \big( \pstate{1} \otimes \pstate{0} \big) = 0.21 \pstate{10} $.
</i>
<a href="CS24_Two_Probabilistic_Bits_Solutions.ipynb#task2">click for our solution</a>
<h3> Task 3</h3>
Find the probabilistic state of the composite system by calculating this tensor product $ \myvector{0.2 \\ 0.8} \otimes \myvector{0.6 \\ 0.4 } $.
<a href="CS24_Two_Probabilistic_Bits_Solutions.ipynb#task3">click for our solution</a>
<h3> Task 4</h3>
Find the vector representations of $ \pstate{00} $, $ \pstate{01} $, $\pstate{10}$, and $ \pstate{11} $.
<i>The vector representation of $ \pstate{ab} $ is $ \pstate{a} \otimes \pstate{b} $ for $ a,b \in \{0,1\} $.</i>
<a href="CS24_Two_Probabilistic_Bits_Solutions.ipynb#task4">click for our solution</a>
---
<h3> Extra: Task 5 </h3>
Suppose that we have three bits.
Find the vector representations of $ \pstate{abc} $ for each $ a,b,c \in \{0,1\} $.
<h3> Extra: Task 6 </h3>
<i>This task is challenging.</i>
Suppose that we have four bits.
Number 9 is represented as $ 1001 $ in binary. Verify that the vector representation of $ \pstate{1001} $ is the zero vector except its $10$th entry, which is 1.
Number 7 is represented as $ 0111 $ in binary. Verify that the vector representation of $ \pstate{0111} $ is the zero vector except its $8$th entry, which is 1.
Generalize this idea for any number between 0 and 15.
Generalize this idea for any number of bits.
| github_jupyter |
# Introduction to the Quantum Bit
### Where we'll explore:
* **Quantum Superposition**
* **Quantum Entanglement**
* **Running experiments on a laptop-hosted simulator**
* **Running experiments on a real quantum computer**
### Brandon Warren
### SDE, Zonar Systems
github.com/brandonwarren/intro-to-qubit contains this Jupyter notebook and installation tips.
```
import py_cas_slides as slides
# real 6-qubit quantum computer, incl interface electronics
slides.system()
# import QISkit, define function to set backend that will execute our circuits
HISTO_SIZE = (9,4) # width, height in inches
CIRCUIT_SIZE = 1.0 # scale (e.g. 0.5 is half-size)
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, execute
from qiskit import BasicAer as Aer
from qiskit.tools.visualization import plot_histogram
from qiskit import __qiskit_version__
print(__qiskit_version__)
def set_backend(use_simulator: bool, n_qubits: int, preferred_backend: str=''):
if use_simulator:
backend = Aer.get_backend('qasm_simulator')
else:
from qiskit import IBMQ
provider = IBMQ.load_account()
if preferred_backend:
# use backend specified by caller
backend = provider.get_backend(preferred_backend)
print(f"Using {backend.name()}")
else:
# use least-busy backend that has enough qubits
from qiskit.providers.ibmq import least_busy
large_enough_devices = provider.backends(filters=lambda x: x.configuration().n_qubits >= n_qubits and not x.configuration().simulator)
backend = least_busy(large_enough_devices)
print(f"The best backend is {backend.name()}")
return backend
def add_missing_keys(counts):
# we want all keys present in counts, even if they are zero value
for key in ['00', '01', '10', '11']:
if key not in counts:
counts[key] = 0
# use simulator for now
backend = set_backend(use_simulator=True, n_qubits=2)
# write code to build this quantum circuit
# logic flows left to right
# quantum bits begin in ground state (zero)
# measurement copies result to classical bit
slides.simple_2qubits() # simplest possible 2-qubit circuit
# 1. Build simplest possible 2-qubit quantum circuit and draw it
q_reg = QuantumRegister(2, 'q') # the 2 qubits we'll be using
c_reg = ClassicalRegister(2, 'c') # clasical bits to hold results of measurements
circuit = QuantumCircuit(q_reg, c_reg) # begin circuit - just 2 qubits and 2 classical bits
# measure while still in ground state
circuit.measure(q_reg, c_reg) # measure qubits, place results in classical bits
# circuit is now complete
circuit.draw(output='mpl', scale=CIRCUIT_SIZE)
# run it 1000 times on simulator
result = execute(circuit, backend=backend, shots=1000).result()
counts = result.get_counts(circuit)
print(counts)
add_missing_keys(counts)
print(counts)
plot_histogram(counts, figsize=HISTO_SIZE)
# 2. Apply X gate (NOT gate) to high qubit (q1)
q_reg = QuantumRegister(2, 'q')
c_reg = ClassicalRegister(2, 'c')
circuit = QuantumCircuit(q_reg, c_reg)
###### apply X gate to high qubit ######
circuit.x(q_reg[1])
circuit.measure(q_reg, c_reg)
circuit.draw(output='mpl', scale=CIRCUIT_SIZE)
# run it 1000 times on simulator
result = execute(circuit, backend=backend, shots=1000).result()
counts = result.get_counts(circuit)
print(counts)
add_missing_keys(counts)
plot_histogram(counts, figsize=HISTO_SIZE)
# We've seen the two simplest quantum circuits possible.
# Let's take it up a notch and place each qubit into a quantum superposition.
# ?
slides.super_def()
# Like you flip a coin - while it is spinning it is H and T.
# When you catch it, it is H or T.
# BUT: it is as if it was that way all along.
# What's the difference between that, and a coin under a
# piece of paper that is revealed?
slides.feynman_quote()
slides.double_slit()
# (2)
# Like the photon that is in 2 places at once, the qubit can
# be in 2 states at once, and become 0 or 1 when it is measured.
# Let's place our 2 qubits in superposion and measure them.
# The act of measurement collapses the superposition,
# resulting in 1 of the 2 possible values.
# H - Hadamard will turn our 0 into a superposition of 0 and 1.
# It rotates the state of the qubit.
# (coin over table analogy)
# 3. Apply H gate to both qubits
q_reg = QuantumRegister(2, 'q')
c_reg = ClassicalRegister(2, 'c')
circuit = QuantumCircuit(q_reg, c_reg)
###### apply H gate to both qubits ######
circuit.h(q_reg[0])
circuit.h(q_reg[1])
circuit.measure(q_reg, c_reg)
circuit.draw(output='mpl', scale=CIRCUIT_SIZE)
# histo - 2 bits x 2 possibilities = 4 combinations of equal probability
result = execute(circuit, backend=backend, shots=1000).result()
counts = result.get_counts(circuit)
print(counts)
add_missing_keys(counts)
plot_histogram(counts, figsize=HISTO_SIZE)
# TRUE random numbers! (when run on real device)
# Special case of superposition, entanglement, revealed by EPR expmt
slides.mermin_quote()
# Before we get to that, i'd like to set the stage by intro
# 2 concepts: locality and hidden variables.
# The principle of locality says that for one thing to affect
# another, they have to be in the same location, or need some
# kind of field or signal connecting the two, with
# the fastest possible propagation speed being that of light.
# This even applies to gravity, which prop at the speed of light.
# [We are 8 light-minutes from the Sun, so if the Sun all of a
# sudden vanished somehow, we would still orbit for another 8 min.]
#
# Even though Einstein helped launch the new field of QM, he never
# really liked it. In particular, he couln't accept the randomness.
slides.einstein_dice()
slides.bohr_response()
# (3)
slides.epr_nyt()
# (4)
slides.einstein_vs_bohr()
# [Describe entanglement using coins odd,even]
# 4. Entanglement - even-parity
q_reg = QuantumRegister(2, 'q')
c_reg = ClassicalRegister(2, 'c')
circuit = QuantumCircuit(q_reg, c_reg)
###### place q[0] in superposition ######
circuit.h(q_reg[0])
###### CNOT gate - control=q[0] target=q[1] - places into even-parity Bell state
# Target is inverted if control is true
circuit.cx(q_reg[0], q_reg[1])
circuit.measure(q_reg, c_reg)
circuit.draw(output='mpl', scale=CIRCUIT_SIZE)
result = execute(circuit, backend=backend, shots=1000).result()
counts = result.get_counts(circuit)
print(counts)
add_missing_keys(counts)
plot_histogram(counts, figsize=HISTO_SIZE)
# 5. Entanglement - odd-parity
q_reg = QuantumRegister(2, 'q')
c_reg = ClassicalRegister(2, 'c')
circuit = QuantumCircuit(q_reg, c_reg)
###### place q[0] in superposition ######
circuit.h(q_reg[0])
###### CNOT gate - control=q[0] target=q[1] - places into even-parity Bell state
# Target is inverted if control is true
circuit.cx(q_reg[0], q_reg[1])
# a 0/1 superposition is converted to a 1/0 superposition
# i.e. rotates state 180 degrees
# creates odd-parity entanglement
circuit.x(q_reg[0])
circuit.measure(q_reg, c_reg)
circuit.draw(output='mpl', scale=CIRCUIT_SIZE)
result = execute(circuit, backend=backend, shots=1000).result()
counts = result.get_counts(circuit)
print(counts)
add_missing_keys(counts)
plot_histogram(counts, figsize=HISTO_SIZE)
# (5)
slides.Bell_CHSH_inequality()
# Let's run the Bell expmt on a real device.
# This will not be a simulation!
# backend = set_backend(use_simulator=False, n_qubits=2) # 1st avail is RISKY
backend = set_backend(use_simulator=False, n_qubits=2, preferred_backend='ibmq_ourense')
# [quickly: draw circuits, execute, then go over code and circuits]
# 6. Bell experiment
import numpy as np
# Define the Quantum and Classical Registers
q = QuantumRegister(2, 'q')
c = ClassicalRegister(2, 'c')
# create Bell state
bell = QuantumCircuit(q, c)
bell.h(q[0]) # place q[0] in superposition
bell.cx(q[0], q[1]) # CNOT gate - control=q[0] target=q[1] - places into even-parity Bell state
# setup measurement circuits
# ZZ not used for Bell inequality, but interesting for real device (i.e. not perfect)
meas_zz = QuantumCircuit(q, c)
meas_zz.barrier()
meas_zz.measure(q, c)
# ZW: A=Z=0° B=W=45°
meas_zw = QuantumCircuit(q, c)
meas_zw.barrier()
meas_zw.s(q[1])
meas_zw.h(q[1])
meas_zw.t(q[1])
meas_zw.h(q[1])
meas_zw.measure(q, c)
# ZV: A=Z=0° B=V=-45°
meas_zv = QuantumCircuit(q, c)
meas_zv.barrier()
meas_zv.s(q[1])
meas_zv.h(q[1])
meas_zv.tdg(q[1])
meas_zv.h(q[1])
meas_zv.measure(q, c)
# XW: A=X=90° B=W=45°
meas_xw = QuantumCircuit(q, c)
meas_xw.barrier()
meas_xw.h(q[0])
meas_xw.s(q[1])
meas_xw.h(q[1])
meas_xw.t(q[1])
meas_xw.h(q[1])
meas_xw.measure(q, c)
# XV: A=X=90° B=V=-45° - instead of being 45° diff,
# they are 90°+45°=135° = 180°-45°,
# which is why the correlation is negative and we negate it
# before adding the the rest of the correlations.
meas_xv = QuantumCircuit(q, c)
meas_xv.barrier()
meas_xv.h(q[0])
meas_xv.s(q[1])
meas_xv.h(q[1])
meas_xv.tdg(q[1])
meas_xv.h(q[1])
meas_xv.measure(q, c)
# build circuits
circuits = []
labels = []
ab_labels = []
circuits.append(bell + meas_zz)
labels.append('ZZ')
ab_labels.append("") # not used
circuits.append(bell + meas_zw)
labels.append('ZW')
ab_labels.append("<AB>")
circuits.append(bell + meas_zv)
labels.append('ZV')
ab_labels.append("<AB'>")
circuits.append(bell + meas_xw)
labels.append('XW')
ab_labels.append("<A'B>")
circuits.append(bell + meas_xv)
labels.append('XV')
ab_labels.append("<A'B'>")
print("Circuit to measure ZZ (A=Z=0° B=Z=0°) - NOT part of Bell expmt")
circuits[0].draw(output='mpl', scale=CIRCUIT_SIZE)
print("Circuit to measure ZW (A=Z=0° B=W=45°)")
print("The gates to the right of the vertical bar rotate the measurement axis.")
circuits[1].draw(output='mpl', scale=CIRCUIT_SIZE)
print("Circuit to measure ZV (A=Z=0° B=V=-45°)")
circuits[2].draw(output='mpl', scale=CIRCUIT_SIZE)
print("Circuit to measure XW (A=X=90° B=W=45°)")
circuits[3].draw(output='mpl', scale=CIRCUIT_SIZE)
print("Circuit to meas XV (A=X=90° B=V=-45°) (negative correlation)")
circuits[4].draw(output='mpl', scale=CIRCUIT_SIZE)
# execute, then review while waiting
from datetime import datetime, timezone
import time
# execute circuits
shots = 1024
job = execute(circuits, backend=backend, shots=shots)
print('after call execute()')
if backend.name() != 'qasm_simulator':
try:
info = None
max_tries = 3
while max_tries>0 and not info:
time.sleep(1) # need to wait a little bit before calling queue_info()
info = job.queue_info()
print(f'queue_info: {info}')
max_tries -= 1
now_utc = datetime.now(timezone.utc)
print(f'\njob status: {info._status} as of {now_utc.strftime("%H:%M:%S")} UTC')
print(f'position: {info.position}')
print(f'estimated start time: {info.estimated_start_time.strftime("%H:%M:%S")}')
print(f'estimated complete time: {info.estimated_complete_time.strftime("%H:%M:%S")}')
wait_time = info.estimated_complete_time - now_utc
wait_min, wait_sec = divmod(wait_time.seconds, 60)
print(f'estimated wait time is {wait_min} minutes {wait_sec} seconds')
except Exception as err:
print(f'error getting job info: {err}')
result = job.result() # blocks until complete
print(f'job complete as of {datetime.now(timezone.utc).strftime("%H:%M:%S")} UTC')
# gather data
counts = []
for i, label in enumerate(labels):
circuit = circuits[i]
data = result.get_counts(circuit)
counts.append(data)
# show counts of Bell state measured in Z-axis
print('\n', labels[0], counts[0], '\n')
# show histogram of Bell state measured in Z-axis
# real devices are not yet perfect. due to noise.
add_missing_keys(counts[0])
plot_histogram(counts[0], figsize=HISTO_SIZE)
# tabular output
print(' (+) (+) (-) (-)')
print(' P(00) P(11) P(01) P(10) correlation')
C = 0.0
for i in range(1, len(labels)):
AB = 0.0
print(f'{labels[i]} ', end ='')
N = 0
for out in ('00', '11', '01', '10'):
P = counts[i][out]/float(shots)
N += counts[i][out]
if out in ('00', '11'):
AB += P
else:
AB -= P
print(f'{P:.3f} ', end='')
if N != shots:
print(f'ERROR: N={N} shots={shots}')
print(f'{AB:6.3f} {ab_labels[i]}')
if labels[i] == 'XV':
# the negative correlation - make it positive before summing it
C -= AB
else:
C += AB
print(f"\nC = <AB> + <AB'> + <A'B> - <A'B'>")
print(f' = <ZW> + <ZV> + <XW> - <XV>')
print(f' = {C:.2f}\n')
if C <= 2.0:
print("Einstein: 1 Quantum theory: 0")
else:
print("Einstein: 0 Quantum theory: 1")
```
## Superposition and entanglement main points
* Superposition is demonstrated by the double-slit experiment, which suggests that a photon can be in two positions at once, because the interference pattern only forms if two photons interfere with each other, and it forms even if we send one photon at a time.
* Hidden variable theories seek to provide determinism to quantum physics.
* The principle of locality states that an influence of one particle on another cannot propagate faster than the speed of light.
* Entanglement cannot be explained by local hidden variable theories.
## Summary
* Two of the strangest concepts in quantum physics, superposition and entanglement, are used in quantum computing, and are waiting to be explored by you.
* You can run simple experiments on your laptop, and when you're ready, run them on a real quantum computer, over the cloud, for free.
* IBM's qiskit.org contains software, tutorials, and an active Slack community.
* My Github repo includes this presentation, tips on installing IBM's Qiskit on your laptop, and links for varying levels of explanations of superpositions and entanglements:
github.com/brandonwarren/intro-to-qubit
| github_jupyter |
# Objects and Classes
---
## The baisc idea is to capture the atributes of an object (plane, matrix, pet, ...) in an abstract description, along with the methods to interact with such objects.
>> ## This abstract description is what we call a class
---
## Specific instances of a class are captured as objects.
>> convention is tht class names are specificed with capital letters
```
class Complex:
def __init__(self, realpart, imagpart):
self.r = realpart
self.i = imagpart
x = Complex(3.0, -4.5)
x.r, x.i
```
Try to write a class that takes in a point as an object. three-space
```
class Point3D:
def __init__(self, x, y, z):
"""Initialize a point in a three dimensional plane of real values"""
self.x = x
self.y = y
self.z = z
def distance(self, point):
"""Compute Distance to Another Point"""
d = (
(self.x - point.x) ** 2 + (self.y - point.y) ** 2 + (self.z - point.z) ** 2
) ** 0.5
return d
def shiftedPoint(self, shx, shy, shz):
"""shift point by specified offset"""
newx = self.x + shx
newy = self.y + shy
newz = self.x + shz
return Point3D(newx, newy, newz)
p = Point3D(0,0,1)
q = Point3D(0,0,2)
p.distance(q)
q = p.shiftedPoint(42,0,5)
q.x
def Euclidean_GCD(a, b):
while b != 0:
t = b
b = a % b
a = t
return a
class Rational:
def __init__(self, n, d):
"""construct a rational number in the lowest term"""
if d == 0:
raise ZeroDivisionError("Denominator of rational may not be zero.")
else:
g = Euclidean_GCD(n, d)
self.n = n / g
self.d = d / g
def __add__(self, other):
"""add two rational numbers"""
return Rational(self.n * other.d + other.n * self.d, self.d * other.d)
def __sub__(self, other):
"""subtract two rational numbers"""
return Rational(self.n * other.d - other.n * self.d, self.d * other.d)
def __mul__(self, other):
"""multiply two rational numbers"""
return Rational(self.n * other.n, self.d * other.d)
def __div__(self, other):
"""divide two rational numbers"""
return Rational(self.n * other.d, self.d * other.n)
def __eq__(self, other):
"""check if two rational numbers are equivalent"""
if self.n * other.d == other.n * self.d:
return True
else:
return False
def __str__(self):
"""convert fraction to string"""
return str(self.n) + "/" + str(self.d)
def __repr__(self):
"""returns a valid python description of a fraction"""
return "Rational(" + str(int(self.n)) + "," + str(int(self.d)) + ")"
def __le__(self):
"""<= for fractions"""
self_float = self.n / self.d
other_float = other.n / other.d
if self.n * other.d <= other.n * self.d:
return True
else:
return False
peter=Rational(1,2)
print(peter)
petra = Rational(1,2)
peter = Rational(2,4)
alice = Rational(3,5)
petra == peter
petra == alice
alice + petra == alice + peter
petra - alice == alice - peter
```
# Iterators in Python
---
## To iterate over an an object in Python wiht a for-loop, the following steps are performed:
>>**1. Derive an assoicated iterator by applying iter() to the object**
>> **2. The next function is applied to the iterator until a stop iteration exception occurs**
```
a = 'Hey there'
aa = iter(a)
aa
type(a)
next(aa)
next(aa)
next(aa)
next(aa)
next(aa)
next(aa)
next(aa)
next(aa)
next(aa)
next(aa)
next(aa)
class SmallMatrix:
def __init__(self, m11, m12, m21, m22):
self.row1 = (
m11,
m12,
)
self.row2 = (
m21,
m22,
)
def __str__(self):
"""convert fraction to string"""
row1_string = str(self.row1[0]) + " " + str(self.row1[1])
row2_string = str(self.row2[0]) + " " + str(self.row1[1])
return row1_string + "\n" + row2_string
def __iter__(self):
self._counter = 0 # common conventon in python code. A single underscore means for private use only
return self
def __next__(self):
if self._counter == 0:
self_counter += 1
return self.row1[0]
if self._counter == 1:
self_counter += 1
return self.row1[1]
if self._counter == 2:
self_counter += 1
return self.row2[0]
if self._counter == 3:
self_counter += 1
return self.row2[1]
raise StopIteration
a = SmallMatrix(42, 0, 9, 18)
for i in a.row1:
print(i)
```
# Generators in Python
---
## Often, we can work with a generator which saves us from implementing __next__ and __iter__. Generators look just like functions, but instead of "return" they use yeild. When a generator is called repeatedly. It continues after the yeild statement, maintaining all values from the prior call.
```
def squares():
a = 0
while True:
yield a * a
a+=1
g = squares()
next(g)
next(g)
next(g)
next(g)
next(g)
[next(g) for i in range(50)]
def is_prime(m):
"""return True if and only if n is a prime number"""
n = abs(m)
if n == 0 or n == 1 or (n % 2 == 0 and n > 2):
return False
for i in range(3, int(n ** (1 / 2) + 1), 2):
if n % i == 0:
return False
return True
def Endless_Primes():
yield 2
n += 3
while True:
if isprime(n):
yield n
n += 12
def twinprimes(b):
a = 3
while True:
if is_prime(a) == True and is_prime(b) == True:
yield a + b
a += 2
[next(g) for i in range (20)]
k = twinprimes(3)
x=3
[next(k) for i in range (x)] ### this runs super long for x>3
```
| github_jupyter |
```
import sys
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
import torch.optim as optim
from sklearn.model_selection import train_test_split
from torch.autograd import Variable
from tqdm import trange
sys.path.append('/home/raffaele/Documents/ml-project/src/')
```
## Import Dataset
```
data = np.genfromtxt('/home/raffaele/Documents/ml-project/cup/ML-CUP20-TR.csv', delimiter=',', dtype=np.float32)
X = data[:, 1:-2]
y = data[:, -2:]
print(X.shape)
print(y.shape)
```
### Split train set and Validation Set
```
Xtrain, Xval, ytrain, yval = train_test_split(X, y, test_size=0.10, random_state=42)
print(Xtrain.shape)
print(ytrain.shape)
print(Xval.shape)
print(yval.shape)
BATCH_SIZE = len(Xtrain)
train_dataset = TensorDataset(torch.Tensor(Xtrain), torch.Tensor(ytrain))
train_loader = DataLoader(train_dataset, batch_size = BATCH_SIZE, shuffle=True)
# train_loader = DataLoader(train_dataset, shuffle=True)
```
## Define Models
```
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.input_layer = nn.Linear(10,100)
self.hidden1 = nn.Linear(100,50)
self.output = nn.Linear(50,2)
def forward(self, x):
x = torch.relu(self.input_layer(x))
x = torch.relu(self.hidden1(x))
x = self.output(x)
return x
net = Net()
print(net)
def train(net, optimizer, epochs=100, val_split=None):
loss_list = []
acc_list = []
val_loss_list = []
val_acc_list = []
history = {"loss" : loss_list, "acc" : acc_list,
"val_loss": val_loss_list, "val_acc" : val_acc_list}
# optimizer = optim.SGD(net.parameters(),lr = 0.01,momentum = 0.)
# criterion = nn.MSELoss()
if (len(val_split) == 2):
test_dataset = TensorDataset(torch.Tensor(val_split[0]), torch.Tensor(val_split[1]))
test_loader = DataLoader(test_dataset, batch_size = BATCH_SIZE, shuffle=True)
# test_loader = DataLoader(test_dataset, shuffle=True)
for epoch in (t := trange(epochs)):
for inputs, targets in train_loader:
optimizer.zero_grad()
out = net(inputs)
loss = MEE(out, targets)
# loss = nn.MSELoss(out, targets)
loss.backward()
optimizer.step()
acc,_ = evaluate(net, train_loader, verbose=False)
val_acc, val_loss = evaluate(net, test_loader, verbose=False, criterion=True)
val_loss_list.append(val_loss)
loss_list.append(loss)
acc_list.append(acc)
val_acc_list.append(val_acc)
t.set_description('epoch %d/%d loss=%.5f acc=%.2f val_loss=%.5f val_acc=%.2f'
%(epoch+1, epochs, loss.item(), acc, val_loss, val_acc))
return history
def evaluate(net, test_loader, verbose=True, criterion=False):
correct = 0
total = 0
loss = 0
with torch.no_grad():
for data in test_loader:
X,y = data
output = net(X)
if (criterion):
loss = MEE(output, y)
# loss = nn.MSELoss(out, targets)
for idx, i in enumerate(output):
# pred = torch.round(torch.max(i))
pred = output[idx]
# print(pred)
# print(y[idx])
# print(pred)
# print(y[idx])
if ((pred == y[idx]).all()):
correct+=1
total+=1
if verbose:
print("Accuracy: ", round(correct/total, 2))
# print(correct)
return round(correct/total, 2), loss
```
### Initialize the weights
```
def init_weights(m):
if type(m) == nn.Linear:
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.0)
net.apply(init_weights)
def MEE(y_real, y_pred):
# return torch.mean(torch.cdist(y_real, y_pred, p=2))
# return torch.div(torch.sum(F.pairwise_distance(y_real, y_pred, p=2)), len(y_real))
return torch.mean(torch.linalg.norm(y_real - y_pred, axis=1))
# return torch.mean(torch.sqrt(torch.square((y_real - y_pred))))
# return torch.div(torch.linalg.norm(y_pred - y_real), len(y_real))
optimizer = optim.SGD(net.parameters(),lr = 0.006, momentum = 0.8, weight_decay=0.0001)
history = train(net, epochs=1000, optimizer=optimizer, val_split=(Xval, yval))
import sys
sys.path.append('/home/raffaele/Documents/ml-project/src/')
from torch_utility import *
plot_loss(history)
data = [
history['loss'][-1],
history['acc'][-1],
history['val_loss'][-1],
history['val_acc'][-1],
]
table_info(data)
to_predict = torch.tensor(list(Xval), dtype=torch.float, requires_grad=False)
out = net(to_predict)
out = out.detach().numpy()
x = out[:,0]
y = out[:,1]
plt.scatter(x,y)
x_real = yval[:,0]
y_real = yval[:,1]
plt.scatter(x_real, y_real)
from sklearn.metrics import euclidean_distances
def mean_euclidean_error(y_true, y_pred):
assert y_true.shape == y_pred.shape
# return np.mean(np.linalg.norm(y_pred - y_true))
# return np.divide(np.linalg.norm(y - y_real), len(y_real))
# return np.mean(euclidean_distances(y_true, y_pred))
return np.mean(np.linalg.norm(y_true - y_pred, axis=1)) #utilizzare questa loss la prossima grid
mean_euclidean_error(out, yval)
from sklearn.metrics import euclidean_distances
def mean_euclidean_error(y_true, y_pred):
assert y_true.shape == y_pred.shape
# return np.mean(np.linalg.norm(y_pred - y_true))
# return np.divide(np.linalg.norm(y - y_real), len(y_real))
# return np.mean(euclidean_distances(y_true, y_pred))
return np.mean(np.linalg.norm(y_true - y_pred, axis=1)) #utilizzare questa loss la prossima grid
class MEE(torch.nn.Module):
def __init__(self):
super(MEE, self).__init__()
def forward(self, y_true, y_pred):
# return torch.mean(torch.linalg.norm(y_pred - y_true))
# return torch.mean(torch.cdist(y_true, y_pred, p=2))
return torch.div(torch.sum(torch.pairwise_distance(y_true, y_pred)), len(y_true))
# return torch.div(torch.linalg.norm(y_pred - y_true, ord=None), len(y_true))
# return torch.div(torch.linalg.norm(y_pred - y_true), len(y_true))
class Net(nn.Module):
def __init__(self, num_units):
super(Net, self).__init__()
self.input_layer = nn.Linear(10,num_units)
self.output = nn.Linear(num_units,2)
def forward(self, x):
x = torch.sigmoid(self.input_layer(x))
x = self.output(x)
return x
from skorch import NeuralNetRegressor
from skorch.callbacks import EarlyStopping
test_net = Net(100,)
nett = NeuralNetRegressor(test_net, max_epochs=1000,
lr=0.01,
batch_size=64,
optimizer=optim.SGD,
optimizer__momentum=0.8,
optimizer__weight_decay=0.0001,
optimizer__nesterov = True,
criterion=MEE,
callbacks=[EarlyStopping(patience=100)]
)
# Training
nett.fit(Xtrain, ytrain)
train_loss = nett.history[:, 'train_loss']
valid_loss = nett.history[:, 'valid_loss']
plt.plot(train_loss, '-', label='training')
plt.plot(valid_loss, '--', label='validation')
plt.ylim(2,4)
plt.xlim(50,1000)
plt.legend()
plt.show()
prova = nett.predict(Xval)
x1 = prova[:,0]
y1 = prova[:,1]
plt.scatter(x1, y1)
x_real = yval[:,0]
y_real = yval[:,1]
plt.scatter(x_real, y_real)
mean_euclidean_error(prova, yval)
```
| github_jupyter |
1. Split into train and test data
2. Train model on train data normally
3. Take test data and duplicate into test prime
4. Drop first visit from test prime data
5. Get predicted delta from test prime data. Compare to delta from test data. We know the difference (epsilon) because we dropped actual visits. What percent of time is test delta < test prime delta?
6. Restrict it only to patients with lot of visits. Is this better?
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pickle
def clean_plot():
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.grid()
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'x-large',
# 'figure.figsize': (10,6),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
import sys
import torch
sys.path.append('../data')
from load import chf
from data_utils import parse_data
from synthetic_data import load_piecewise_synthetic_data
sys.path.append('../model')
from models import Sublign
from run_experiments import get_hyperparameters
def make_test_prime(test_data_dict_raw, drop_first_T=1.):
# drop first year
test_data_dict = copy.deepcopy(test_data_dict_raw)
eps_lst = list()
X = test_data_dict['obs_t_collect']
Y = test_data_dict['Y_collect']
M = test_data_dict['mask_collect']
N_patients = X.shape[0]
N_visits = X.shape[1]
for i in range(N_patients):
eps_i = X[i,1,0] - X[i,0,0]
first_visit = X[i,1,0]
# move all visits down (essentially destroying the first visit)
for j in range(N_visits-gap):
X[i,j,0] = X[i,j+gap,0] - first_visit
Y[i,j,:] = Y[i,j+gap,:]
M[i,j,:] = M[i,j+gap,:]
for g in range(1,gap+1):
X[i,N_visits-g,0] = int(-1000)
Y[i,N_visits-g,:] = int(-1000)
M[i,N_visits-g,:] = 0.
eps_lst.append(eps_i)
return test_data_dict, eps_lst
data = chf()
max_visits = 38
shuffle = True
num_output_dims = data.shape[1] - 4
data_loader, collect_dict, unique_pid = parse_data(data.values, max_visits=max_visits)
train_data_loader, train_data_dict, test_data_loader, test_data_dict, test_pid, unique_pid = parse_data(data.values,
max_visits=max_visits, test_per=0.2,
shuffle=shuffle)
# model = Sublign(10, 20, 50, dim_biomarkers=num_output_dims, sigmoid=True, reg_type='l1', auto_delta=True,
# max_delta=5, learn_time=True, device=torch.device('cuda'))
# # model.fit(data_loader, data_loader, args.epochs, 0.01, verbose=args.verbose,fname='runs/chf.pt',eval_freq=25)
# fname='../model/chf_good.pt'
# model.load_state_dict(torch.load(fname,map_location=torch.device('cuda')))
test_p_data_dict, eps_lst = make_test_prime(test_data_dict, gap=1)
# test_deltas = model.get_deltas(test_data_dict).detach().numpy()
# test_p_deltas = model.get_deltas(test_p_data_dict).detach().numpy()
print(num_output_dims)
# def make_test_prime(test_data_dict_raw, drop_first_T=1.):
drop_first_T = 0.5
# drop first year
test_data_dict_new = copy.deepcopy(test_data_dict)
eps_lst = list()
X = test_data_dict_new['obs_t_collect']
Y = test_data_dict_new['Y_collect']
M = test_data_dict_new['mask_collect']
N_patients = X.shape[0]
N_visits = X.shape[1]
remove_idx = list()
X[X == -1000] = np.nan
for i in range(N_patients):
N_visits_under_thresh = (X[i] < 0.5).sum()
gap = N_visits_under_thresh
first_valid_visit = X[i,N_visits_under_thresh,0]
eps_i = X[i,N_visits_under_thresh,0]
for j in range(N_visits-N_visits_under_thresh):
X[i,j,0] = X[i,j+gap,0] - first_valid_visit
Y[i,j,:] = Y[i,j+gap,:]
M[i,j,:] = M[i,j+gap,:]
for g in range(1,N_visits_under_thresh+1):
X[i,N_visits-g,0] = np.nan
Y[i,N_visits-g,:] = np.nan
M[i,N_visits-g,:] = 0.
if np.isnan(X[i]).all():
remove_idx.append(i)
else:
eps_lst.append(eps_i)
keep_idx = [i for i in range(N_patients) if i not in remove_idx]
X = X[keep_idx]
Y = Y[keep_idx]
M = M[keep_idx]
print('Removed %d entries' % len(remove_idx))
X[np.isnan(X)] = -1000
# eps_lst.append(eps_i)
# return test_data_dict_new, eps_lst
eps_lst
X[0]
first_valid_visit
test_data_dict_new = copy.deepcopy(test_data_dict)
X = test_data_dict_new['obs_t_collect']
Y = test_data_dict_new['Y_collect']
M = test_data_dict_new['mask_collect']
X[X == -1000] = np.nan
i = 1
N_visits_under_thresh = (X[i] < 0.5).sum()
# for j in range(N_visits-N_visits_under_thresh):
# X[i,j,0] = X[i,j+gap,0] - first_visit
# Y[i,j,:] = Y[i,j+gap,:]
# M[i,j,:] = M[i,j+gap,:]
# for g in range(1,N_visits_under_thresh+1):
# X[i,N_visits-g,0] = np.nan
# Y[i,N_visits-g,:] = np.nan
# M[i,N_visits-g,:] = 0.
# if np.isnan(X[i]).all():
# print('yes')
# remove_idx.append(i)
(X[1] < 0.5).sum()
N_visits_under_thresh
N_visits_under_thresh
len(remove_idx)
X[X == -1000] = np.nan
for i in range(10):
print(X[i].flatten())
remove_idx
X[0][:10]
plt.hist(X.flatten())
X.max()
Y[1][:10]
test_data_dict_new['']
f = open('chf_experiment_results.pk', 'rb')
results = pickle.load(f)
test_deltas = results['test_deltas']
test_p_deltas = results['test_p_deltas']
eps_lst = results['eps_lst']
test_data_dict = results['test_data_dict']
f.close()
test_data_dict['obs_t_collect'][0].shape
# get num of visits per patient
num_visits_patient_lst = list()
for i in test_data_dict['obs_t_collect']:
num_visits = (i!=-1000).sum()
num_visits_patient_lst.append(num_visits)
num_visits_patient_lst = np.array(num_visits_patient_lst)
freq_visit_idx = np.where(num_visits_patient_lst > 10)[0]
test_p_deltas[freq_visit_idx]
test_deltas[freq_visit_idx]
np.mean(np.array(test_p_deltas - test_deltas) > 0)
test_p_deltas[:20]
clean_plot()
plt.plot(eps_lst, test_p_deltas - test_deltas, '.')
plt.xlabel('Actual eps')
plt.ylabel('Estimated eps')
# plt.savefig('')
import copy
def make_test_prime(test_data_dict_raw, gap=1):
test_data_dict = copy.deepcopy(test_data_dict_raw)
eps_lst = list()
X = test_data_dict['obs_t_collect']
Y = test_data_dict['Y_collect']
M = test_data_dict['mask_collect']
N_patients = X.shape[0]
N_visits = X.shape[1]
for i in range(N_patients):
eps_i = X[i,1,0] - X[i,0,0]
first_visit = X[i,1,0]
# move all visits down (essentially destroying the first visit)
for j in range(N_visits-gap):
X[i,j,0] = X[i,j+gap,0] - first_visit
Y[i,j,:] = Y[i,j+gap,:]
M[i,j,:] = M[i,j+gap,:]
for g in range(1,gap+1):
X[i,N_visits-g,0] = int(-1000)
Y[i,N_visits-g,:] = int(-1000)
M[i,N_visits-g,:] = 0.
eps_lst.append(eps_i)
return test_data_dict, eps_lst
t_prime_dict, eps_lst = make_test_prime(test_data_dict)
t_prime_dict['Y_collect'][1,:,0]
test_data_dict['Y_collect'][1,:,0]
```
## Plot successful model
```
import argparse
import numpy as np
import pickle
import sys
import torch
import copy
from scipy.stats import pearsonr
import matplotlib.pyplot as plt
from run_experiments import get_hyperparameters
from models import Sublign
sys.path.append('../data')
from data_utils import parse_data
from load import load_data_format
sys.path.append('../evaluation')
from eval_utils import swap_metrics
train_data_dict['Y_collect'].shape
train_data_dict['t_collect'].shape
new_Y = np.zeros((600,101,3))
val_idx_dict = {'%.1f' % j: i for i,j in enumerate(np.linspace(0,10,101))}
train_data_dict['obs_t_collect'].max()
rounded_t = np.round(train_data_dict['t_collect'],1)
N, M, _ = rounded_t.shape
for i in range(N):
for j in range(M):
val = rounded_t[i,j,0]
# try:
idx = val_idx_dict['%.1f' % val]
for k in range(3):
new_Y[i,idx,k] = train_data_dict['Y_collect'][i,j,k]
# except:
# print(val)
new_Y.shape
(new_Y == 0).sum() / (600*101*3)
# save the files for comparing against SPARTan baseline
for i in range(3):
a = new_Y[:,:,i]
np.savetxt("data1_dim%d.csv" % i, a, deliREDACTEDer=",")
true_labels = train_data_dict['s_collect'][:,0]
guess_labels = np.ones(600)
adjusted_rand_score(true_labels,guess_labels)
from sklearn.metrics import adjusted_rand_score
# a.shape
data_format_num = 1
# C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)
anneal, b_vae, C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)
C
data = load_data_format(data_format_num, 0, cache=True)
train_data_loader, train_data_dict, _, _, test_data_loader, test_data_dict, valid_pid, test_pid, unique_pid = parse_data(data.values, max_visits=4, test_per=0.2, valid_per=0.2, shuffle=False)
model = Sublign(d_s, d_h, d_rnn, dim_biomarkers=3, sigmoid=True, reg_type='l1', auto_delta=False, max_delta=0, learn_time=False, beta=0.00)
model.fit(train_data_loader, test_data_loader, 800, lr, fname='runs/data%d_chf_experiment.pt' % (data_format_num), eval_freq=25)
z = model.get_mu(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'])
# fname='runs/data%d_chf_experiment.pt' % (data_format_num)
# model.load_state_dict(torch.load(fname))
nolign_results = model.score(train_data_dict, test_data_dict)
print('ARI: %.3f' % nolign_results['ari'])
print(anneal, b_vae, C, d_s, d_h, d_rnn, reg_type, lr)
data_format_num = 1
# C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)
anneal, b_vae, C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)
model = Sublign(d_s, d_h, d_rnn, dim_biomarkers=3, sigmoid=True, reg_type='l1', auto_delta=True, max_delta=5, learn_time=True, beta=0.01)
model.fit(train_data_loader, test_data_loader, 800, lr, fname='runs/data%d.pt' % (data_format_num), eval_freq=25)
z = model.get_mu(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'])
# fname='runs/data%d_chf_experiment.pt' % (data_format_num)
# model.load_state_dict(torch.load(fname))
results = model.score(train_data_dict, test_data_dict)
print('ARI: %.3f' % results['ari'])
# model = Sublign(d_s, d_h, d_rnn, dim_biomarkers=3, sigmoid=True, reg_type='l1', auto_delta=True, max_delta=5, learn_time=True, b_vae=0.)
# model.fit(train_data_loader, test_data_loader, 800, lr, fname='runs/data%d_chf_experiment.pt' % (data_format_num), eval_freq=25)
# z = model.get_mu(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'])
# # fname='runs/data%d_chf_experiment.pt' % (data_format_num)
# # model.load_state_dict(torch.load(fname))
# results = model.score(train_data_dict, test_data_dict)
# print('ARI: %.3f' % results['ari'])
# Visualize latent space (change configs above)
X = test_data_dict['obs_t_collect']
Y = test_data_dict['Y_collect']
M = test_data_dict['mask_collect']
test_z, _ = model.get_mu(X,Y)
test_z = test_z.detach().numpy()
test_subtypes = test_data_dict['s_collect']
from sklearn.manifold import TSNE
z_tSNE = TSNE(n_components=2).fit_transform(test_z)
test_s0_idx = np.where(test_subtypes==0)[0]
test_s1_idx = np.where(test_subtypes==1)[0]
clean_plot()
plt.plot(z_tSNE[test_s0_idx,0],z_tSNE[test_s0_idx,1],'.')
plt.plot(z_tSNE[test_s1_idx,0],z_tSNE[test_s1_idx,1],'.')
# plt.title('\nNELBO (down): %.3f, ARI (up): %.3f\n Config: %s\nColors = true subtypes' %
# (nelbo, ari, configs))
plt.show()
def sigmoid_f(x, beta0, beta1):
result = 1. / (1+np.exp(-(beta0 + beta1*x)))
return result
true_betas = [[[-4, 1],
[-1,1.],
[-8,8]
],
[
[-1,1.],
[-8,8],
[-25, 3.5]
]]
# xs = np.linspace(0,10,100)
for dim_i in range(3):
xs = np.linspace(0,10,100)
plt.figure()
clean_plot()
plt.grid(True)
ys = [sigmoid_f(xs_i, true_betas[0][dim_i][0], true_betas[0][dim_i][1]) for xs_i in xs]
plt.plot(xs,ys, ':', color='gray', linewidth=5, label='True function')
ys = [sigmoid_f(xs_i, true_betas[1][dim_i][0], true_betas[1][dim_i][1]) for xs_i in xs]
plt.plot(xs,ys, ':', color='gray', linewidth=5)
for subtype_j in range(2):
xs = np.linspace(0,10,100)
ys = [sigmoid_f(xs_i, nolign_results['cent_lst'][subtype_j,dim_i,0],
nolign_results['cent_lst'][subtype_j,dim_i,1]) for xs_i in xs]
if subtype_j == 0:
plt.plot(xs,ys,linewidth=4, label='SubNoLign subtype', linestyle='-.', color='tab:green')
else:
plt.plot(xs,ys,linewidth=4, linestyle='--', color='tab:green')
ys = [sigmoid_f(xs_i, results['cent_lst'][subtype_j,dim_i,0],
results['cent_lst'][subtype_j,dim_i,1]) for xs_i in xs]
if subtype_j == 0:
plt.plot(xs,ys,linewidth=4, label='SubLign subtype', linestyle='-', color='tab:purple')
else:
plt.plot(xs,ys,linewidth=4, linestyle='-', color='tab:purple')
plt.xlabel('Disease stage')
plt.ylabel('Biomarker')
plt.legend()
plt.savefig('subnolign_data1_subtypes_dim%d.pdf' % dim_i, bbox_inches='tight')
# # number dimensions
# fig, axs = plt.subplots(1,3, figsize=(8,4))
# for dim_i in range(3):
# ax = axs[dim_i]
# # number subtypes
# for subtype_j in range(2):
# xs = np.linspace(0,10,100)
# ys = [sigmoid_f(xs_i, model1_results['cent_lst'][subtype_j,dim_i,0],
# model1_results['cent_lst'][subtype_j,dim_i,1]) for xs_i in xs]
# ax.plot(xs,ys)
# ys = [sigmoid_f(xs_i, true_betas[0][dim_i][0], true_betas[0][dim_i][1]) for xs_i in xs]
# ax.plot(xs,ys, color='gray')
# ys = [sigmoid_f(xs_i, true_betas[1][dim_i][0], true_betas[1][dim_i][1]) for xs_i in xs]
# ax.plot(xs,ys, color='gray')
# fig.suptitle('True data generating function (gray), learned models (orange, blue)')
# plt.savefig('learned_models.pdf',bbox_inches='tight')
```
## Plot CHF Delta distributions
```
data = pickle.load(open('../clinical_runs/chf_v3_1000.pk', 'rb'))
clean_plot()
plt.hist(data['deltas'], bins=20)
plt.xlabel('Inferred Alignment $\delta_i$ Value')
plt.ylabel('Number Heart Failure Patients')
plt.savefig('Delta_dist_chf.pdf', bbox_inches='tight')
```
## Make piecewise data to measure model misspecification
```
from scipy import interpolate
x = np.arange(0, 2*np.pi+np.pi/4, 2*np.pi/8)
y = np.sin(x)
tck = interpolate.splrep(x, y, s=0)
xnew = np.arange(0, 2*np.pi, np.pi/50)
ynew = interpolate.splev(xnew, tck, der=0)
xvals = np.array([9.3578453 , 4.9814664 , 7.86530539, 8.91318433, 2.00779188])[sort_idx]
yvals = np.array([0.35722491, 0.12512101, 0.20054626, 0.38183604, 0.58836923])[sort_idx]
tck = interpolate.splrep(xvals, yvals, s=0)
y
N_subtypes,D,N_pts,_ = subtype_points.shape
fig, axes = plt.subplots(ncols=3,nrows=1)
for d, ax in enumerate(axes.flat):
# ax.set_xlim(0,10)
# ax.set_ylim(0,1)
for k in range(N_subtypes):
xs = subtype_points[k,d,:,0]
ys = subtype_points[k,d,:,1]
sort_idx = np.argsort(xs)
ax.plot(xs[sort_idx],ys[sort_idx])
plt.show()
# for d in range(D):
%%time
N_epochs = 800
N_trials = 5
use_sigmoid = True
sublign_results = {
'ari':[],
'pear': [],
'swaps': []
}
subnolign_results = {'ari': []}
for trial in range(N_trials):
data_format_num = 1
# C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)
anneal, b_vae, C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)
# C
# data = load_data_format(data_format_num, 0, cache=True)
use_sigmoid = False
data, subtype_points = load_piecewise_synthetic_data(subtypes=2, increasing=use_sigmoid,
D=3, N=2000,M=4, noise=0.25, N_pts=5)
train_data_loader, train_data_dict, _, _, test_data_loader, test_data_dict, valid_pid, test_pid, unique_pid = parse_data(data.values, max_visits=4, test_per=0.2, valid_per=0.2, shuffle=False)
model = Sublign(d_s, d_h, d_rnn, dim_biomarkers=3, sigmoid=use_sigmoid, reg_type='l1',
auto_delta=False, max_delta=5, learn_time=True, beta=1.)
model.fit(train_data_loader, test_data_loader, N_epochs, lr, fname='runs/data%d_spline.pt' % (data_format_num), eval_freq=25)
# z = model.get_mu(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'])
# fname='runs/data%d_chf_experiment.pt' % (data_format_num)
# model.load_state_dict(torch.load(fname))
results = model.score(train_data_dict, test_data_dict)
print('Sublign results: ARI: %.3f; Pear: %.3f; Swaps: %.3f' % (results['ari'],results['pear'],results['swaps']))
sublign_results['ari'].append(results['ari'])
sublign_results['pear'].append(results['pear'])
sublign_results['swaps'].append(results['swaps'])
model = Sublign(d_s, d_h, d_rnn, dim_biomarkers=3, sigmoid=use_sigmoid, reg_type='l1',
auto_delta=False, max_delta=0, learn_time=False, beta=1.)
model.fit(train_data_loader, test_data_loader, N_epochs, lr, fname='runs/data%d_spline.pt' % (data_format_num), eval_freq=25)
nolign_results = model.score(train_data_dict, test_data_dict)
print('SubNoLign results: ARI: %.3f' % (nolign_results['ari']))
subnolign_results['ari'].append(nolign_results['ari'])
data_str = 'Increasing' if use_sigmoid else 'Any'
print('SubLign-%s & %.2f $\\pm$ %.2f & %.2f $\\pm$ %.2f & %.2f $\\pm$ %.2f \\\\' % (
data_str,
np.mean(sublign_results['ari']), np.std(sublign_results['ari']),
np.mean(sublign_results['pear']), np.std(sublign_results['pear']),
np.mean(sublign_results['swaps']), np.std(sublign_results['swaps'])
))
print('SubNoLign-%s & %.2f $\\pm$ %.2f & -- & -- \\\\' % (
data_str,
np.mean(sublign_results['ari']), np.std(sublign_results['ari']),
))
results = model.score(train_data_dict, test_data_dict)
print('Sublign results: ARI: %.3f; Pear: %.3f; Swaps: %.3f' % (results['ari'],results['pear'],results['swaps']))
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import xarray as xr
import zarr
import math
import glob
import pickle
import statistics
import scipy.stats as stats
from sklearn.neighbors import KernelDensity
import dask
import seaborn as sns
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
def get_files():
models = glob.glob("/terra/data/cmip5/global/historical/*")
avail={}
for model in models:
zg = glob.glob(str(model)+"/r1i1p1/day/2deg/zg*")
try:
test = zg[0]
avail[model.split('/')[-1]] = zg
except:
pass
return avail
files = get_files()
files['NOAA'] = glob.glob("/home/pmarsh/NOAA_2deg/NOAA_zg/*.nc")
files['ERA5'] = glob.glob("/home/pmarsh/NOAA_2deg/ERA5_zg/*.nc")
files.pop('MIROC-ESM')
def contourise(x):
x = x.fillna(0)
x = x.where((x>=limit))
x = x/x
return x
results={}
for model in files.keys():
print(model)
x = xr.open_mfdataset(files[model])
if model == 'NOAA':
x = x.rename({'hgt':'zg'})
x = x.rename({'level':'plev'})
x = x.sel(plev=850)
x = x.sel(time=slice('1950','2005'))
elif model == 'ERA5':
x = x.rename({'level':'plev'})
x = x.sel(plev=850)
x = x.sel(time=slice('1979','2005'))
else:
x = x.sel(plev=85000)
x = x.sel(time=slice('1950','2005'))
x = x.load()
x = x.sel(lat=slice(-60,0))
x = x[['zg']]
x = x.assign_coords(lon=(((x.lon + 180) % 360) - 180))
with dask.config.set(**{'array.slicing.split_large_chunks': True}):
x = x.sortby(x.lon)
x = x.sel(lon=slice(-50,20))
x = x.resample(time="QS-DEC").mean(dim="time",skipna=True)
x = x.load()
limit = np.nanquantile(x.zg.values,0.9)
results[model]={}
for seas in ['DJF','MAM','JJA','SON']:
mean_seas = x.where(x.time.dt.season==str(seas)).dropna(dim='time')
mean_seas = contourise(mean_seas).zg.fillna(0).mean(dim='time')
results[model][seas] = mean_seas.fillna(0)
x.close()
pickle.dump(results, open( "../HIGH_OUT/SASH_track_2D.p", "wb" ) )
weights = np.cos(np.deg2rad(results['NOAA']['DJF'].lat)) #area weighted
#mean absolute error calc
scores=[]
for index in results:
MAE={}
for season in ['DJF','MAM','JJA','SON']:
ref = results['NOAA'][season]
x = results[index][season]
MAE[season] = (np.abs(ref - x)).weighted(weights).sum(('lat','lon'))
scores.append([index,np.mean(MAE['DJF'].values + MAE['MAM'].values + MAE['JJA'].values + MAE['SON'].values)])
resultsdf = pd.DataFrame(np.array(scores),columns=['model','score'])
resultsdf = resultsdf.sort_values('score').set_index('model')['score']
pickle.dump( resultsdf, open( "../HIGH_OUT/scores_2D.p", "wb" ) )
resultsdf.to_csv("../HIGH_OUT/scores_2D.csv")
```
| github_jupyter |
```
kos = '../out/rev_sequencing_kos.tsv'
%matplotlib inline
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('white')
plt.rc('font', size=12)
import numpy as np
import pandas as pd
d = {'S288C': 'S288C',
'Y55': 'Y55',
'UWOPS87': 'UWOP',
'YPS606': 'YPS'}
inter = pd.read_csv(kos, sep='\t')
inter['size'] = inter['stop'] - inter['start']
inter = inter[inter['size'] > 100]
ci = inter.groupby(['target', 'strain'])['set'].nunique()
ci = ci[ci == 2]
inter = inter.set_index(['target', 'strain']).loc[ci.index].reset_index()
niter = inter.groupby(['set', 'strain', 'gene'])['sample'].count().loc['new']
oiter = inter.groupby(['set', 'strain', 'gene'])['sample'].count().loc['original']
m = niter.to_frame().join(oiter.to_frame(),
how='outer',
lsuffix='_new',
rsuffix='_original')
m[np.isnan(m)] = 0.0
plt.figure(figsize=(4, 4))
plt.plot(m['sample_original'],
m['sample_new'],
'k.',
alpha=0.3)
plt.plot([-0.5, 13],
[-0.5, 13],
'--',
color='grey',
alpha=0.5)
plt.xlabel('Number of genes with no coverage\n(Original mutants)')
plt.ylabel('Number of genes with no coverage\n(New mutants)')
plt.title('All strains')
plt.xlim(-0.5, 13)
plt.ylim(-0.5, 13)
plt.savefig('ko_sequencing.png',
dpi=300, bbox_inches='tight',
transparent=True)
plt.savefig('ko_sequencing.svg',
dpi=300, bbox_inches='tight',
transparent=True);
plt.figure(figsize=(8, 8))
for i, strain in enumerate(['S288C',
'Y55',
'YPS606',
'UWOPS87']):
plt.subplot(2, 2, i+1)
plt.plot(m.loc[strain]['sample_original'],
m.loc[strain]['sample_new'],
'k.',
alpha=0.3,
label='_')
plt.xlabel('Number of genes with no coverage\n(Original mutants)')
plt.ylabel('Number of genes with no coverage\n(New mutants)')
plt.title(d[strain])
plt.xlim(-0.5, 13)
plt.ylim(-0.5, 13)
plt.plot([-0.5, 13],
[-0.5, 13],
'--',
color='grey',
alpha=0.5)
plt.tight_layout()
plt.savefig('ko_sequencing_all.png',
dpi=300, bbox_inches='tight',
transparent=True)
plt.savefig('ko_sequencing_all.svg',
dpi=300, bbox_inches='tight',
transparent=True);
g = None
for gene in ['URA3', 'CAN1', 'LYP1', 'LEU2', 'MET17']:
x = inter[inter['name'].isin([gene])
].groupby(['strain', 'set'])['sample'
].nunique() / inter.groupby(['strain', 'set'])[
'sample'].nunique()
x[np.isnan(x)] = 0.0
x.name = gene
if g is None:
g = x.to_frame()
else:
g = g.join(x.to_frame(), how='outer')
g
```
| github_jupyter |
# Not completed.
```
import json
import requests
import csv
import pandas as pd
import os
import matplotlib.pylab as plt
import numpy as np
%matplotlib inline
pd.options.mode.chained_assignment = None
from statsmodels.tsa.arima_model import ARIMA
import statsmodels.api as sm
import operator
from statsmodels.tsa.stattools import acf
from statsmodels.tsa.stattools import pacf
from pandas.tools.plotting import autocorrelation_plot
dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d')
indicator_data = pd.read_csv('P:\\ADS\\Final\\Indicators_Cleaned.csv',header=0,parse_dates=True,index_col='Year',date_parser=dateparse, low_memory=False)
indicator_data.head()
indicator_data.reset_index()
indicator_data.head()
argentina_df_ind = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) & \
(indicator_data['CountryCode'] == 'AR')]
argentina_df_ind.index
argentina_df_ind
ts = argentina_df_ind['Value']
ts1 = argentina_df_ind[['Value']].copy()
ts1['Value']=ts1['Value']+20
ts1.head()
plt.plot(ts1)
from statsmodels.tsa.stattools import adfuller
def test_stationarity(timeseries):
#Determing rolling statistics
rolmean = pd.rolling_mean(timeseries, window=12)
rolstd = pd.rolling_std(timeseries, window=12)
#Plot rolling statistics:
orig = plt.plot(timeseries, color='blue',label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label = 'Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation')
plt.show(block=False)
#Perform Dickey-Fuller test:
print ('Results of Dickey-Fuller Test:')
dftest = adfuller(timeseries, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key,value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print (dfoutput)
test_stationarity(ts1.Value)
decomposition = sm.tsa.seasonal_decompose(ts1, model='additive')
fig = decomposition.plot()
plt.show()
def logTransform(df):
ts_log = np.log(df)
plt.plot(ts_log)
return ts_log
ts1_log = logTransform(ts1)
#test_stationarity(ts1_log.Value)
def logFirstDifference(ts1_log):
ts1_log_diff = ts1_log - ts1_log.shift()
ts1_log_diff.dropna(inplace=True)
return ts1_log_diff
ts1_log_diff = logFirstDifference(ts1_log)
test_stationarity(ts1_log_diff.Value)
def firstDifference(df):
ts_first_diff = df - df.shift()
ts_first_diff.dropna(inplace=True)
return ts_first_diff
ts1_first_diff = firstDifference(ts1)
test_stationarity(ts1_first_diff.Value)
lag_acf = acf(ts1_log_diff, nlags=10)
lag_pacf = pacf(ts1_log_diff, nlags=10, method='ols')
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(ts1_log_diff, lags=10, ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(ts1_log_diff, lags=10, ax=ax2)
```
- As seen from the graph above both ACF and PACF are geometric hence this is an ARMA model
```
autocorrelation_plot(ts1_log_diff)
plt.show()
plt.subplot(122)
plt.plot(lag_pacf)
plt.axhline(y=0,linestyle='--',color='gray')
plt.axhline(y=-1.96/np.sqrt(len(ts1_log_diff)),linestyle='--',color='gray')
plt.axhline(y=1.96/np.sqrt(len(ts1_log_diff)),linestyle='--',color='gray')
plt.title('Partial Autocorrelation Function')
plt.tight_layout()
aic_metric = pd.DataFrame({'Modelname':[],'AIC':[]})
aic_dict = {}
def cal_aic_metric(modelname,model):
global aic_metric
AIC = model.aic
aic_dict[modelname] = AIC
df_error = pd.DataFrame({'Modelname':[modelname],'AIC':[AIC]})
aic_metric = pd.concat([aic_metric,df_error])
return aic_metric
def AR_Model(ts):
model = ARIMA(ts, order=(1, 1, 0))
results_AR = model.fit(disp=0)
cal_aic_metric('ARIMA(ts, order=(1, 0, 0))',results_AR)
print('Lag: %s' % results_AR.k_ar)
print('Coefficients: %s' % results_AR.params)
#print(results_AR.summary())
predict_MA_HPI = np.exp(results_AR.predict(10, 10, dynamic=True))
print(predict_MA_HPI)
plt.plot(ts1_log)
plt.plot(results_AR.fittedvalues, color='red')
#print(np.exp(results_AR.fittedvalues))
print(results_AR.aic)
return results_AR
model_AR = AR_Model(ts1_log_diff)
def MA_Model(ts):
model = ARIMA(ts, order=(0,1, 1))
results_MA = model.fit(disp=0)
cal_aic_metric('ARIMA(ts, order=(2, 1, 2))',results_MA)
print('Lag: %s' % results_MA.k_ar)
print('Coefficients: %s' % results_MA.params)
print(results_MA.summary())
plt.plot(ts)
plt.plot(results_MA.fittedvalues, color='red')
return results_MA
model_MA = MA_Model(ts1_log_diff)
def Combined_Model(ts):
model = ARIMA(ts, order=(2, 1, 1))
results_ARIMA = model.fit(disp=0)
cal_aic_metric('ARIMA(ts, order=(2,1, 3))',results_ARIMA)
print('Lag: %s' % results_ARIMA.k_ar)
print('Coefficients: %s' % results_ARIMA.params)
print(results_ARIMA.summary())
plt.plot(ts)
plt.plot(results_ARIMA.fittedvalues, color='red')
return results_ARIMA
model_Combined = Combined_Model(ts1_log_diff)
best_model = min(aic_dict.items(),key=operator.itemgetter(1))[0]
print('Best Model is ', best_model)
aic_metric
#Forecast using Best Model
def forecast(model,numSteps):
#model.forecast(steps=numSteps)
output = model.forecast(steps=numSteps)[0]
output.tolist()
output = np.exp(output)
#print(output)
return normal(output)
def forC(n):
output_forecast = forecast(model_Combined,57)
return output_forecast[:n]
forC(57)
def FittedValues(model):
fittedVal=model.fittedvalues
PredictedVal=np.exp(fittedVal)
np.savetxt('PredictedValues.csv', PredictedVal, delimiter=",")
print('Predicted existing values are:')
return PredictedVal
```
# Taking it to normal scale
```
def normal(predictions_ARIMA_diff):
#predictions_ARIMA_diff = pd.Series(results_ARIMA.fittedvalues, copy=True)
predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum()
predictions_ARIMA_log = pd.Series(ts1_log.ix[0], index=ts1_log.index)
#print(predictions_ARIMA_diff_cumsum.shape," ",predictions_ARIMA_log.shape)
predictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum,fill_value=0)
#predictions_ARIMA = np.exp(predictions_ARIMA_log)
predictions_ARIMA_log = predictions_ARIMA_log -20
return predictions_ARIMA_log
```
| github_jupyter |
If Statements
===
By allowing you to respond selectively to different situations and conditions, if statements open up whole new possibilities for your programs. In this section, you will learn how to test for certain conditions, and then respond in appropriate ways to those conditions.
[Previous: Introducing Functions](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/introducing_functions.ipynb) |
[Home](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/index.ipynb) |
[Next: While Loops and Input](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/while_input.ipynb)
Contents
===
- [What is an *if* statement?](#What-is-an-*if*-statement?)
- [Example](#Example)
- [Logical tests](#Logical-tests)
- [Equality](#Equality)
- [Inequality](#Inequality)
- [Other inequalities](#Other-inequalities)
- [Checking if an item is in a list](#Checking-if-an-item-is-in-a-list)
- [Exercises](#Exercises-logical)
- [The if-elif...else chain](#The-if-elif...else-chain)
- [Simple if statements](#Simple-if-statements)
- [if-else statements](#if-else-statements)
- [if-elif...else chains](#if-elif...else-chains)
- [Exercises](#Exercises-elif)
- [More than one passing test](#More-than-one-passing-test)
- [True and False values](#True-and-False-values)
- [Overall Challenges](#Overall-Challenges)
What is an *if* statement?
===
An *if* statement tests for a condition, and then responds to that condition. If the condition is true, then whatever action is listed next gets carried out. You can test for multiple conditions at the same time, and respond appropriately to each condition.
Example
---
Here is an example that shows a number of the desserts I like. It lists those desserts, but lets you know which one is my favorite.
```
# A list of desserts I like.
desserts = ['ice cream', 'chocolate', 'rhubarb crisp', 'cookies']
favorite_dessert = 'apple crisp'
# Print the desserts out, but let everyone know my favorite dessert.
for dessert in desserts:
if dessert == favorite_dessert:
# This dessert is my favorite, let's let everyone know!
print("%s is my favorite dessert!" % dessert.title())
else:
# I like these desserts, but they are not my favorite.
print("I like %s." % dessert)
```
#### What happens in this program?
- The program starts out with a list of desserts, and one dessert is identified as a favorite.
- The for loop runs through all the desserts.
- Inside the for loop, each item in the list is tested.
- If the current value of *dessert* is equal to the value of *favorite_dessert*, a message is printed that this is my favorite.
- If the current value of *dessert* is not equal to the value of *favorite_dessert*, a message is printed that I just like the dessert.
You can test as many conditions as you want in an if statement, as you will see in a little bit.
Logical Tests
===
Every if statement evaluates to *True* or *False*. *True* and *False* are Python keywords, which have special meanings attached to them. You can test for the following conditions in your if statements:
- [equality](#equality) (==)
- [inequality](#inequality) (!=)
- [other inequalities](#other_inequalities)
- greater than (>)
- greater than or equal to (>=)
- less than (<)
- less than or equal to (<=)
- [You can test if an item is **in** a list.](#in_list)
### Whitespace
Remember [learning about](http://introtopython.org/lists_tuples.html#pep8) PEP 8? There is a [section of PEP 8](http://www.python.org/dev/peps/pep-0008/#other-recommendations) that tells us it's a good idea to put a single space on either side of all of these comparison operators. If you're not sure what this means, just follow the style of the examples you see below.
Equality
---
Two items are *equal* if they have the same value. You can test for equality between numbers, strings, and a number of other objects which you will learn about later. Some of these results may be surprising, so take a careful look at the examples below.
In Python, as in many programming languages, two equals signs tests for equality.
**Watch out!** Be careful of accidentally using one equals sign, which can really throw things off because that one equals sign actually sets your item to the value you are testing for!
```
5 == 5
3 == 5
5 == 5.0
'eric' == 'eric'
'Eric' == 'eric'
'Eric'.lower() == 'eric'.lower()
'5' == 5
'5' == str(5)
```
Inequality
---
Two items are *inequal* if they do not have the same value. In Python, we test for inequality using the exclamation point and one equals sign.
Sometimes you want to test for equality and if that fails, assume inequality. Sometimes it makes more sense to test for inequality directly.
```
3 != 5
5 != 5
'Eric' != 'eric'
```
Other Inequalities
---
### greater than
```
5 > 3
```
### greater than or equal to
```
5 >= 3
3 >= 3
```
### less than
```
3 < 5
```
### less than or equal to
```
3 <= 5
3 <= 3
```
Checking if an item is **in** a list
---
You can check if an item is in a list using the **in** keyword.
```
vowels = ['a', 'e', 'i', 'o', 'u']
'a' in vowels
vowels = ['a', 'e', 'i', 'o', 'u']
'b' in vowels
```
<a id="Exercises-logical"></a>
Exercises
---
#### True and False
- Write a program that consists of at least ten lines, each of which has a logical statement on it. The output of your program should be 5 **True**s and 5 **False**s.
- Note: You will probably need to write `print(5 > 3)`, not just `5 > 3`.
The if-elif...else chain
===
You can test whatever series of conditions you want to, and you can test your conditions in any combination you want.
Simple if statements
---
The simplest test has a single **if** statement, and a single statement to execute if the condition is **True**.
```
dogs = ['willie', 'hootz', 'peso', 'juno']
if len(dogs) > 3:
print("Wow, we have a lot of dogs here!")
```
In this situation, nothing happens if the test does not pass.
```
###highlight=[2]
dogs = ['willie', 'hootz']
if len(dogs) > 3:
print("Wow, we have a lot of dogs here!")
```
Notice that there are no errors. The condition `len(dogs) > 3` evaluates to False, and the program moves on to any lines after the **if** block.
if-else statements
---
Many times you will want to respond in two possible ways to a test. If the test evaluates to **True**, you will want to do one thing. If the test evaluates to **False**, you will want to do something else. The **if-else** structure lets you do that easily. Here's what it looks like:
```
dogs = ['willie', 'hootz', 'peso', 'juno']
if len(dogs) > 3:
print("Wow, we have a lot of dogs here!")
else:
print("Okay, this is a reasonable number of dogs.")
```
Our results have not changed in this case, because if the test evaluates to **True** only the statements under the **if** statement are executed. The statements under **else** area only executed if the test fails:
```
###highlight=[2]
dogs = ['willie', 'hootz']
if len(dogs) > 3:
print("Wow, we have a lot of dogs here!")
else:
print("Okay, this is a reasonable number of dogs.")
```
The test evaluated to **False**, so only the statement under `else` is run.
if-elif...else chains
---
Many times, you will want to test a series of conditions, rather than just an either-or situation. You can do this with a series of if-elif-else statements
There is no limit to how many conditions you can test. You always need one if statement to start the chain, and you can never have more than one else statement. But you can have as many elif statements as you want.
```
dogs = ['willie', 'hootz', 'peso', 'monty', 'juno', 'turkey']
if len(dogs) >= 5:
print("Holy mackerel, we might as well start a dog hostel!")
elif len(dogs) >= 3:
print("Wow, we have a lot of dogs here!")
else:
print("Okay, this is a reasonable number of dogs.")
```
It is important to note that in situations like this, only the first test is evaluated. In an if-elif-else chain, once a test passes the rest of the conditions are ignored.
```
###highlight=[2]
dogs = ['willie', 'hootz', 'peso', 'monty']
if len(dogs) >= 5:
print("Holy mackerel, we might as well start a dog hostel!")
elif len(dogs) >= 3:
print("Wow, we have a lot of dogs here!")
else:
print("Okay, this is a reasonable number of dogs.")
```
The first test failed, so Python evaluated the second test. That test passed, so the statement corresponding to `len(dogs) >= 3` is executed.
```
###highlight=[2]
dogs = ['willie', 'hootz']
if len(dogs) >= 5:
print("Holy mackerel, we might as well start a dog hostel!")
elif len(dogs) >= 3:
print("Wow, we have a lot of dogs here!")
else:
print("Okay, this is a reasonable number of dogs.")
```
In this situation, the first two tests fail, so the statement in the else clause is executed. Note that this statement would be executed even if there are no dogs at all:
```
###highlight=[2]
dogs = []
if len(dogs) >= 5:
print("Holy mackerel, we might as well start a dog hostel!")
elif len(dogs) >= 3:
print("Wow, we have a lot of dogs here!")
else:
print("Okay, this is a reasonable number of dogs.")
```
Note that you don't have to take any action at all when you start a series of if statements. You could simply do nothing in the situation that there are no dogs by replacing the `else` clause with another `elif` clause:
```
###highlight=[8]
dogs = []
if len(dogs) >= 5:
print("Holy mackerel, we might as well start a dog hostel!")
elif len(dogs) >= 3:
print("Wow, we have a lot of dogs here!")
elif len(dogs) >= 1:
print("Okay, this is a reasonable number of dogs.")
```
In this case, we only print a message if there is at least one dog present. Of course, you could add a new `else` clause to respond to the situation in which there are no dogs at all:
```
###highlight=[10,11]
dogs = []
if len(dogs) >= 5:
print("Holy mackerel, we might as well start a dog hostel!")
elif len(dogs) >= 3:
print("Wow, we have a lot of dogs here!")
elif len(dogs) >= 1:
print("Okay, this is a reasonable number of dogs.")
else:
print("I wish we had a dog here.")
```
As you can see, the if-elif-else chain lets you respond in very specific ways to any given situation.
<a id="Exercises-elif"></a>
Exercises
---
#### Three is a Crowd
- Make a list of names that includes at least four people.
- Write an if test that prints a message about the room being crowded, if there are more than three people in your list.
- Modify your list so that there are only two people in it. Use one of the methods for removing people from the list, don't just redefine the list.
- Run your if test again. There should be no output this time, because there are less than three people in the list.
- **Bonus:** Store your if test in a function called something like `crowd_test`.
#### Three is a Crowd - Part 2
- Save your program from *Three is a Crowd* under a new name.
- Add an `else` statement to your if tests. If the `else` statement is run, have it print a message that the room is not very crowded.
#### Six is a Mob
- Save your program from *Three is a Crowd - Part 2* under a new name.
- Add some names to your list, so that there are at least six people in the list.
- Modify your tests so that
- If there are more than 5 people, a message is printed about there being a mob in the room.
- If there are 3-5 people, a message is printed about the room being crowded.
- If there are 1 or 2 people, a message is printed about the room not being crowded.
- If there are no people in the room, a message is printed abou the room being empty.
More than one passing test
===
In all of the examples we have seen so far, only one test can pass. As soon as the first test passes, the rest of the tests are ignored. This is really good, because it allows our code to run more efficiently. Many times only one condition can be true, so testing every condition after one passes would be meaningless.
There are situations in which you want to run a series of tests, where every single test runs. These are situations where any or all of the tests could pass, and you want to respond to each passing test. Consider the following example, where we want to greet each dog that is present:
```
dogs = ['willie', 'hootz']
if 'willie' in dogs:
print("Hello, Willie!")
if 'hootz' in dogs:
print("Hello, Hootz!")
if 'peso' in dogs:
print("Hello, Peso!")
if 'monty' in dogs:
print("Hello, Monty!")
```
If we had done this using an if-elif-else chain, only the first dog that is present would be greeted:
```
###highlight=[6,7,8,9,10,11]
dogs = ['willie', 'hootz']
if 'willie' in dogs:
print("Hello, Willie!")
elif 'hootz' in dogs:
print("Hello, Hootz!")
elif 'peso' in dogs:
print("Hello, Peso!")
elif 'monty' in dogs:
print("Hello, Monty!")
```
Of course, this could be written much more cleanly using lists and for loops. See if you can follow this code.
```
dogs_we_know = ['willie', 'hootz', 'peso', 'monty', 'juno', 'turkey']
dogs_present = ['willie', 'hootz']
# Go through all the dogs that are present, and greet the dogs we know.
for dog in dogs_present:
if dog in dogs_we_know:
print("Hello, %s!" % dog.title())
```
This is the kind of code you should be aiming to write. It is fine to come up with code that is less efficient at first. When you notice yourself writing the same kind of code repeatedly in one program, look to see if you can use a loop or a function to make your code more efficient.
True and False values
===
Every value can be evaluated as True or False. The general rule is that any non-zero or non-empty value will evaluate to True. If you are ever unsure, you can open a Python terminal and write two lines to find out if the value you are considering is True or False. Take a look at the following examples, keep them in mind, and test any value you are curious about. I am using a slightly longer test just to make sure something gets printed each time.
```
if 0:
print("This evaluates to True.")
else:
print("This evaluates to False.")
if 1:
print("This evaluates to True.")
else:
print("This evaluates to False.")
# Arbitrary non-zero numbers evaluate to True.
if 1253756:
print("This evaluates to True.")
else:
print("This evaluates to False.")
# Negative numbers are not zero, so they evaluate to True.
if -1:
print("This evaluates to True.")
else:
print("This evaluates to False.")
# An empty string evaluates to False.
if '':
print("This evaluates to True.")
else:
print("This evaluates to False.")
# Any other string, including a space, evaluates to True.
if ' ':
print("This evaluates to True.")
else:
print("This evaluates to False.")
# Any other string, including a space, evaluates to True.
if 'hello':
print("This evaluates to True.")
else:
print("This evaluates to False.")
# None is a special object in Python. It evaluates to False.
if None:
print("This evaluates to True.")
else:
print("This evaluates to False.")
```
Overall Challenges
===
#### Alien Points
- Make a list of ten aliens, each of which is one color: 'red', 'green', or 'blue'.
- You can shorten this to 'r', 'g', and 'b' if you want, but if you choose this option you have to include a comment explaining what r, g, and b stand for.
- Red aliens are worth 5 points, green aliens are worth 10 points, and blue aliens are worth 20 points.
- Use a for loop to determine the number of points a player would earn for destroying all of the aliens in your list.
- [hint](#hint_alien_points)
- - -
[Previous: Introducing Functions](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/introducing_functions.ipynb) |
[Home](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/index.ipynb) |
[Next: While Loops and Input](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/while_input.ipynb)
Hints
===
These are placed at the bottom, so you can have a chance to solve exercises without seeing any hints.
#### Alien Invaders
- After you define your list of aliens, set a variable called `current_score` or `current_points` equal to 0.
- Inside your for loop, write a series of if tests to determine how many points to add to the current score.
- To keep a running total, use the syntax `current_score = current_score + points`, where *points* is the number of points for the current alien.
| github_jupyter |
# Classification
$$
\renewcommand{\like}{{\cal L}}
\renewcommand{\loglike}{{\ell}}
\renewcommand{\err}{{\cal E}}
\renewcommand{\dat}{{\cal D}}
\renewcommand{\hyp}{{\cal H}}
\renewcommand{\Ex}[2]{E_{#1}[#2]}
\renewcommand{\x}{{\mathbf x}}
\renewcommand{\v}[1]{{\mathbf #1}}
$$
**Note:** We've adapted this Mini Project from [Lab 5 in the CS109](https://github.com/cs109/2015lab5) course. Please feel free to check out the original lab, both for more exercises, as well as solutions.
We turn our attention to **classification**. Classification tries to predict, which of a small set of classes, an observation belongs to. Mathematically, the aim is to find $y$, a **label** based on knowing a feature vector $\x$. For instance, consider predicting gender from seeing a person's face, something we do fairly well as humans. To have a machine do this well, we would typically feed the machine a bunch of images of people which have been labelled "male" or "female" (the training set), and have it learn the gender of the person in the image from the labels and the *features* used to determine gender. Then, given a new photo, the trained algorithm returns us the gender of the person in the photo.
There are different ways of making classifications. One idea is shown schematically in the image below, where we find a line that divides "things" of two different types in a 2-dimensional feature space. The classification show in the figure below is an example of a maximum-margin classifier where construct a decision boundary that is far as possible away from both classes of points. The fact that a line can be drawn to separate the two classes makes the problem *linearly separable*. Support Vector Machines (SVM) are an example of a maximum-margin classifier.

```
%matplotlib inline
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.cm as cm
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 100)
pd.set_option('display.notebook_repr_html', True)
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("poster")
import sklearn.model_selection
c0=sns.color_palette()[0]
c1=sns.color_palette()[1]
c2=sns.color_palette()[2]
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
def points_plot(ax, Xtr, Xte, ytr, yte, clf, mesh=True, colorscale=cmap_light,
cdiscrete=cmap_bold, alpha=0.1, psize=10, zfunc=False, predicted=False):
h = .02
X=np.concatenate((Xtr, Xte))
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
np.linspace(y_min, y_max, 100))
#plt.figure(figsize=(10,6))
if zfunc:
p0 = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 0]
p1 = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z=zfunc(p0, p1)
else:
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
ZZ = Z.reshape(xx.shape)
if mesh:
plt.pcolormesh(xx, yy, ZZ, cmap=cmap_light, alpha=alpha, axes=ax)
if predicted:
showtr = clf.predict(Xtr)
showte = clf.predict(Xte)
else:
showtr = ytr
showte = yte
ax.scatter(Xtr[:, 0], Xtr[:, 1], c=showtr-1, cmap=cmap_bold,
s=psize, alpha=alpha,edgecolor="k")
# and testing points
ax.scatter(Xte[:, 0], Xte[:, 1], c=showte-1, cmap=cmap_bold,
alpha=alpha, marker="s", s=psize+10)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
return ax,xx,yy
def points_plot_prob(ax, Xtr, Xte, ytr, yte, clf, colorscale=cmap_light,
cdiscrete=cmap_bold, ccolor=cm, psize=10, alpha=0.1):
ax,xx,yy = points_plot(ax, Xtr, Xte, ytr, yte, clf, mesh=False,
colorscale=colorscale, cdiscrete=cdiscrete,
psize=psize, alpha=alpha, predicted=True)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=ccolor, alpha=.2, axes=ax)
cs2 = plt.contour(xx, yy, Z, cmap=ccolor, alpha=.6, axes=ax)
plt.clabel(cs2, fmt = '%2.1f', colors = 'k', fontsize=14, axes=ax)
return ax
```
## A Motivating Example Using `sklearn`: Heights and Weights
We'll use a dataset of heights and weights of males and females to hone our understanding of classifiers. We load the data into a dataframe and plot it.
```
dflog = pd.read_csv("data/01_heights_weights_genders.csv")
dflog.head()
```
Remember that the form of data we will use always is

with the "response" or "label" $y$ as a plain array of 0s and 1s for binary classification. Sometimes we will also see -1 and +1 instead. There are also *multiclass* classifiers that can assign an observation to one of $K > 2$ classes and the label may then be an integer, but we will not be discussing those here.
`y = [1,1,0,0,0,1,0,1,0....]`.
<div class="span5 alert alert-info">
<h3>Checkup Exercise Set I</h3>
<ul>
<li> <b>Exercise:</b> Create a scatter plot of Weight vs. Height
<li> <b>Exercise:</b> Color the points differently by Gender
</ul>
</div>
```
# your turn
_ = sns.scatterplot('Height', 'Weight', data=dflog, hue='Gender', alpha=0.3, legend='brief')
_ = plt.legend(loc='lower right', fontsize=14)
plt.show()
```
### Training and Test Datasets
When fitting models, we would like to ensure two things:
* We have found the best model (in terms of model parameters).
* The model is highly likely to generalize i.e. perform well on unseen data.
<br/>
<div class="span5 alert alert-success">
<h4>Purpose of splitting data into Training/testing sets</h4>
<ul>
<li> We built our model with the requirement that the model fit the data well. </li>
<li> As a side-effect, the model will fit <b>THIS</b> dataset well. What about new data? </li>
<ul>
<li> We wanted the model for predictions, right?</li>
</ul>
<li> One simple solution, leave out some data (for <b>testing</b>) and <b>train</b> the model on the rest </li>
<li> This also leads directly to the idea of cross-validation, next section. </li>
</ul>
</div>
First, we try a basic Logistic Regression:
* Split the data into a training and test (hold-out) set
* Train on the training set, and test for accuracy on the testing set
```
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# Split the data into a training and test set.
Xlr, Xtestlr, ylr, ytestlr = train_test_split(dflog[['Height','Weight']].values,
(dflog.Gender == "Male").values,random_state=5)
clf = LogisticRegression(solver='lbfgs')
# Fit the model on the trainng data.
clf.fit(Xlr, ylr)
# Print the accuracy from the testing data.
print(accuracy_score(clf.predict(Xtestlr), ytestlr))
```
### Tuning the Model
The model has some hyperparameters we can tune for hopefully better performance. For tuning the parameters of your model, you will use a mix of *cross-validation* and *grid search*. In Logistic Regression, the most important parameter to tune is the *regularization parameter* `C`. Note that the regularization parameter is not always part of the logistic regression model.
The regularization parameter is used to control for unlikely high regression coefficients, and in other cases can be used when data is sparse, as a method of feature selection.
You will now implement some code to perform model tuning and selecting the regularization parameter $C$.
We use the following `cv_score` function to perform K-fold cross-validation and apply a scoring function to each test fold. In this incarnation we use accuracy score as the default scoring function.
```
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
def cv_score(clf, x, y, score_func=accuracy_score):
result = 0
nfold = 5
for train, test in KFold(nfold).split(x): # split data into train/test groups, 5 times
clf.fit(x[train], y[train]) # fit
result += score_func(clf.predict(x[test]), y[test]) # evaluate score function on held-out data
return result / nfold # average
```
Below is an example of using the `cv_score` function for a basic logistic regression model without regularization.
```
clf = LogisticRegression(solver='lbfgs')
score = cv_score(clf, Xlr, ylr)
print(score)
```
<div class="span5 alert alert-info">
<h3>Checkup Exercise Set II</h3>
<b>Exercise:</b> Implement the following search procedure to find a good model
<ul>
<li> You are given a list of possible values of `C` below
<li> For each C:
<ol>
<li> Create a logistic regression model with that value of C
<li> Find the average score for this model using the `cv_score` function **only on the training set** `(Xlr, ylr)`
</ol>
<li> Pick the C with the highest average score
</ul>
Your goal is to find the best model parameters based *only* on the training set, without showing the model test set at all (which is why the test set is also called a *hold-out* set).
</div>
```
#the grid of parameters to search over
Cs = [0.001, 0.1, 1, 10, 100]
# your turn
scores = []
for c in Cs:
cv_clf = LogisticRegression(C=c, solver='lbfgs', random_state=8)
scores.append(cv_score(cv_clf, Xlr, ylr))
#compile respective scores into a data frame
d = {'Cs': Cs, 'Scores': scores}
score_grid = pd.DataFrame.from_dict(d)
score_grid
```
<div class="span5 alert alert-info">
<h3>Checkup Exercise Set III</h3>
**Exercise:** Now you want to estimate how this model will predict on unseen data in the following way:
<ol>
<li> Use the C you obtained from the procedure earlier and train a Logistic Regression on the training data
<li> Calculate the accuracy on the test data
</ol>
<p>You may notice that this particular value of `C` may or may not do as well as simply running the default model on a random train-test split. </p>
<ul>
<li> Do you think that's a problem?
<li> Why do we need to do this whole cross-validation and grid search stuff anyway?
</ul>
</div>
```
# your turn
```
According to the cross-validation exercise above, the scores hardly varied based on different values of *C*. For the current exercise, in order to try something other than the default, a c-value of 0.1 is used.
```
clf = LogisticRegression(C=0.1, solver='lbfgs')
# Fit the model on the trainng data.
clf.fit(Xlr, ylr)
# Print the accuracy from the testing data.
print(accuracy_score(clf.predict(Xtestlr), ytestlr))
```
As the cross-validation indicated, the accuracy score for this iteration is the same as running the default from before. That's not necessarily a problem, it just shows that this particular dataset is not overly affected by values of *C*. That doesn't mean that cross-validation is not useful.
### Black Box Grid Search in `sklearn`
Scikit-learn, as with many other Python packages, provides utilities to perform common operations so you do not have to do it manually. It is important to understand the mechanics of each operation, but at a certain point, you will want to use the utility instead to save time...
<div class="span5 alert alert-info">
<h3>Checkup Exercise Set IV</h3>
<b>Exercise:</b> Use scikit-learn's [GridSearchCV](http://scikit-learn.org/stable/modules/generated/sklearn.grid_search.GridSearchCV.html) tool to perform cross validation and grid search.
* Instead of writing your own loops above to iterate over the model parameters, can you use GridSearchCV to find the best model over the training set?
* Does it give you the same best value of `C`?
* How does this model you've obtained perform on the test set?</div>
```
# your turn
from sklearn.model_selection import GridSearchCV
param_grid = {'C': Cs}
grid_clf = LogisticRegression(solver='lbfgs')
log_cv = GridSearchCV(grid_clf, param_grid, cv=5, return_train_score=True)
log_cv.fit(Xlr, ylr)
res = pd.DataFrame(log_cv.cv_results_)
res = res.iloc[:, [4,6,7,8,9,10,11,13,14,15,16,17,18,19]]
res
print('The best value of C is {}'.format(log_cv.best_params_))
print('The best test score is {}'.format(log_cv.best_score_))
```
## A Walkthrough of the Math Behind Logistic Regression
### Setting up Some Demo Code
Let's first set some code up for classification that we will need for further discussion on the math. We first set up a function `cv_optimize` which takes a classifier `clf`, a grid of hyperparameters (such as a complexity parameter or regularization parameter) implemented as a dictionary `parameters`, a training set (as a samples x features array) `Xtrain`, and a set of labels `ytrain`. The code takes the traning set, splits it into `n_folds` parts, sets up `n_folds` folds, and carries out a cross-validation by splitting the training set into a training and validation section for each foldfor us. It prints the best value of the parameters, and retuens the best classifier to us.
```
def cv_optimize(clf, parameters, Xtrain, ytrain, n_folds=5):
gs = sklearn.model_selection.GridSearchCV(clf, param_grid=parameters, cv=n_folds)
gs.fit(Xtrain, ytrain)
print("BEST PARAMS", gs.best_params_)
best = gs.best_estimator_
return best
```
We then use this best classifier to fit the entire training set. This is done inside the `do_classify` function which takes a dataframe `indf` as input. It takes the columns in the list `featurenames` as the features used to train the classifier. The column `targetname` sets the target. The classification is done by setting those samples for which `targetname` has value `target1val` to the value 1, and all others to 0. We split the dataframe into 80% training and 20% testing by default, standardizing the dataset if desired. (Standardizing a data set involves scaling the data so that it has 0 mean and is described in units of its standard deviation. We then train the model on the training set using cross-validation. Having obtained the best classifier using `cv_optimize`, we retrain on the entire training set and calculate the training and testing accuracy, which we print. We return the split data and the trained classifier.
```
from sklearn.model_selection import train_test_split
def do_classify(clf, parameters, indf, featurenames, targetname, target1val, standardize=False, train_size=0.8):
subdf=indf[featurenames]
if standardize:
subdfstd=(subdf - subdf.mean())/subdf.std()
else:
subdfstd=subdf
X=subdfstd.values
y=(indf[targetname].values==target1val)*1
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=train_size)
clf = cv_optimize(clf, parameters, Xtrain, ytrain)
clf=clf.fit(Xtrain, ytrain)
training_accuracy = clf.score(Xtrain, ytrain)
test_accuracy = clf.score(Xtest, ytest)
print("Accuracy on training data: {:0.2f}".format(training_accuracy))
print("Accuracy on test data: {:0.2f}".format(test_accuracy))
return clf, Xtrain, ytrain, Xtest, ytest
```
## Logistic Regression: The Math
We could approach classification as linear regression, there the class, 0 or 1, is the target variable $y$. But this ignores the fact that our output $y$ is discrete valued, and futhermore, the $y$ predicted by linear regression will in general take on values less than 0 and greater than 1. Additionally, the residuals from the linear regression model will *not* be normally distributed. This violation means we should not use linear regression.
But what if we could change the form of our hypotheses $h(x)$ instead?
The idea behind logistic regression is very simple. We want to draw a line in feature space that divides the '1' samples from the '0' samples, just like in the diagram above. In other words, we wish to find the "regression" line which divides the samples. Now, a line has the form $w_1 x_1 + w_2 x_2 + w_0 = 0$ in 2-dimensions. On one side of this line we have
$$w_1 x_1 + w_2 x_2 + w_0 \ge 0,$$
and on the other side we have
$$w_1 x_1 + w_2 x_2 + w_0 < 0.$$
Our classification rule then becomes:
\begin{eqnarray*}
y = 1 &\mbox{if}& \v{w}\cdot\v{x} \ge 0\\
y = 0 &\mbox{if}& \v{w}\cdot\v{x} < 0
\end{eqnarray*}
where $\v{x}$ is the vector $\{1,x_1, x_2,...,x_n\}$ where we have also generalized to more than 2 features.
What hypotheses $h$ can we use to achieve this? One way to do so is to use the **sigmoid** function:
$$h(z) = \frac{1}{1 + e^{-z}}.$$
Notice that at $z=0$ this function has the value 0.5. If $z > 0$, $h > 0.5$ and as $z \to \infty$, $h \to 1$. If $z < 0$, $h < 0.5$ and as $z \to -\infty$, $h \to 0$. As long as we identify any value of $y > 0.5$ as 1, and any $y < 0.5$ as 0, we can achieve what we wished above.
This function is plotted below:
```
h = lambda z: 1. / (1 + np.exp(-z))
zs=np.arange(-5, 5, 0.1)
plt.plot(zs, h(zs), alpha=0.5);
```
So we then come up with our rule by identifying:
$$z = \v{w}\cdot\v{x}.$$
Then $h(\v{w}\cdot\v{x}) \ge 0.5$ if $\v{w}\cdot\v{x} \ge 0$ and $h(\v{w}\cdot\v{x}) \lt 0.5$ if $\v{w}\cdot\v{x} \lt 0$, and:
\begin{eqnarray*}
y = 1 &if& h(\v{w}\cdot\v{x}) \ge 0.5\\
y = 0 &if& h(\v{w}\cdot\v{x}) \lt 0.5.
\end{eqnarray*}
We will show soon that this identification can be achieved by minimizing a loss in the ERM framework called the **log loss** :
$$ R_{\cal{D}}(\v{w}) = - \sum_{y_i \in \cal{D}} \left ( y_i \log(h(\v{w}\cdot\v{x})) + ( 1 - y_i) \log(1 - h(\v{w}\cdot\v{x})) \right )$$
We will also add a regularization term:
$$ R_{\cal{D}}(\v{w}) = - \sum_{y_i \in \cal{D}} \left ( y_i \log(h(\v{w}\cdot\v{x})) + ( 1 - y_i) \log(1 - h(\v{w}\cdot\v{x})) \right ) + \frac{1}{C} \v{w}\cdot\v{w},$$
where $C$ is the regularization strength (equivalent to $1/\alpha$ from the Ridge case), and smaller values of $C$ mean stronger regularization. As before, the regularization tries to prevent features from having terribly high weights, thus implementing a form of feature selection.
How did we come up with this loss? We'll come back to that, but let us see how logistic regression works out.
```
dflog.head()
clf_l, Xtrain_l, ytrain_l, Xtest_l, ytest_l = do_classify(LogisticRegression(solver='lbfgs'),
{"C": [0.01, 0.1, 1, 10, 100]},
dflog, ['Weight', 'Height'], 'Gender','Male')
plt.figure()
ax=plt.gca()
points_plot(ax, Xtrain_l, Xtest_l, ytrain_l, ytest_l, clf_l, alpha=0.2);
```
In the figure here showing the results of the logistic regression, we plot the actual labels of both the training(circles) and test(squares) samples. The 0's (females) are plotted in red, the 1's (males) in blue. We also show the classification boundary, a line (to the resolution of a grid square). Every sample on the red background side of the line will be classified female, and every sample on the blue side, male. Notice that most of the samples are classified well, but there are misclassified people on both sides, as evidenced by leakage of dots or squares of one color ontothe side of the other color. Both test and traing accuracy are about 92%.
### The Probabilistic Interpretaion
Remember we said earlier that if $h > 0.5$ we ought to identify the sample with $y=1$? One way of thinking about this is to identify $h(\v{w}\cdot\v{x})$ with the probability that the sample is a '1' ($y=1$). Then we have the intuitive notion that lets identify a sample as 1 if we find that the probabilty of being a '1' is $\ge 0.5$.
So suppose we say then that the probability of $y=1$ for a given $\v{x}$ is given by $h(\v{w}\cdot\v{x})$?
Then, the conditional probabilities of $y=1$ or $y=0$ given a particular sample's features $\v{x}$ are:
\begin{eqnarray*}
P(y=1 | \v{x}) &=& h(\v{w}\cdot\v{x}) \\
P(y=0 | \v{x}) &=& 1 - h(\v{w}\cdot\v{x}).
\end{eqnarray*}
These two can be written together as
$$P(y|\v{x}, \v{w}) = h(\v{w}\cdot\v{x})^y \left(1 - h(\v{w}\cdot\v{x}) \right)^{(1-y)} $$
Then multiplying over the samples we get the probability of the training $y$ given $\v{w}$ and the $\v{x}$:
$$P(y|\v{x},\v{w}) = P(\{y_i\} | \{\v{x}_i\}, \v{w}) = \prod_{y_i \in \cal{D}} P(y_i|\v{x_i}, \v{w}) = \prod_{y_i \in \cal{D}} h(\v{w}\cdot\v{x_i})^{y_i} \left(1 - h(\v{w}\cdot\v{x_i}) \right)^{(1-y_i)}$$
Why use probabilities? Earlier, we talked about how the regression function $f(x)$ never gives us the $y$ exactly, because of noise. This hold for classification too. Even with identical features, a different sample may be classified differently.
We said that another way to think about a noisy $y$ is to imagine that our data $\dat$ was generated from a joint probability distribution $P(x,y)$. Thus we need to model $y$ at a given $x$, written as $P(y|x)$, and since $P(x)$ is also a probability distribution, we have:
$$P(x,y) = P(y | x) P(x)$$
and can obtain our joint probability $P(x, y)$.
Indeed its important to realize that a particular training set can be thought of as a draw from some "true" probability distribution (just as we did when showing the hairy variance diagram). If for example the probability of classifying a test sample as a '0' was 0.1, and it turns out that the test sample was a '0', it does not mean that this model was necessarily wrong. After all, in roughly a 10th of the draws, this new sample would be classified as a '0'! But, of-course its more unlikely than its likely, and having good probabilities means that we'll be likely right most of the time, which is what we want to achieve in classification. And furthermore, we can quantify this accuracy.
Thus its desirable to have probabilistic, or at the very least, ranked models of classification where you can tell which sample is more likely to be classified as a '1'. There are business reasons for this too. Consider the example of customer "churn": you are a cell-phone company and want to know, based on some of my purchasing habit and characteristic "features" if I am a likely defector. If so, you'll offer me an incentive not to defect. In this scenario, you might want to know which customers are most likely to defect, or even more precisely, which are most likely to respond to incentives. Based on these probabilities, you could then spend a finite marketing budget wisely.
### Maximizing the Probability of the Training Set
Now if we maximize $P(y|\v{x},\v{w})$, we will maximize the chance that each point is classified correctly, which is what we want to do. While this is not exactly the same thing as maximizing the 1-0 training risk, it is a principled way of obtaining the highest probability classification. This process is called **maximum likelihood** estimation since we are maximising the **likelihood of the training data y**,
$$\like = P(y|\v{x},\v{w}).$$
Maximum likelihood is one of the corenerstone methods in statistics, and is used to estimate probabilities of data.
We can equivalently maximize
$$\loglike = \log{P(y|\v{x},\v{w})}$$
since the natural logarithm $\log$ is a monotonic function. This is known as maximizing the **log-likelihood**. Thus we can equivalently *minimize* a risk that is the negative of $\log(P(y|\v{x},\v{w}))$:
$$R_{\cal{D}}(h(x)) = -\loglike = -\log \like = -\log{P(y|\v{x},\v{w})}.$$
Thus
\begin{eqnarray*}
R_{\cal{D}}(h(x)) &=& -\log\left(\prod_{y_i \in \cal{D}} h(\v{w}\cdot\v{x_i})^{y_i} \left(1 - h(\v{w}\cdot\v{x_i}) \right)^{(1-y_i)}\right)\\
&=& -\sum_{y_i \in \cal{D}} \log\left(h(\v{w}\cdot\v{x_i})^{y_i} \left(1 - h(\v{w}\cdot\v{x_i}) \right)^{(1-y_i)}\right)\\
&=& -\sum_{y_i \in \cal{D}} \log\,h(\v{w}\cdot\v{x_i})^{y_i} + \log\,\left(1 - h(\v{w}\cdot\v{x_i}) \right)^{(1-y_i)}\\
&=& - \sum_{y_i \in \cal{D}} \left ( y_i \log(h(\v{w}\cdot\v{x})) + ( 1 - y_i) \log(1 - h(\v{w}\cdot\v{x})) \right )
\end{eqnarray*}
This is exactly the risk we had above, leaving out the regularization term (which we shall return to later) and was the reason we chose it over the 1-0 risk.
Notice that this little process we carried out above tells us something very interesting: **Probabilistic estimation using maximum likelihood is equivalent to Empiricial Risk Minimization using the negative log-likelihood**, since all we did was to minimize the negative log-likelihood over the training samples.
`sklearn` will return the probabilities for our samples, or for that matter, for any input vector set $\{\v{x}_i\}$, i.e. $P(y_i | \v{x}_i, \v{w})$:
```
clf_l.predict_proba(Xtest_l)
```
### Discriminative vs Generative Classifier
Logistic regression is what is known as a **discriminative classifier** as we learn a soft boundary between/among classes. Another paradigm is the **generative classifier** where we learn the distribution of each class. For more examples of generative classifiers, look [here](https://en.wikipedia.org/wiki/Generative_model).
Let us plot the probabilities obtained from `predict_proba`, overlayed on the samples with their true labels:
```
plt.figure()
ax = plt.gca()
points_plot_prob(ax, Xtrain_l, Xtest_l, ytrain_l, ytest_l, clf_l, psize=20, alpha=0.1);
```
Notice that lines of equal probability, as might be expected are stright lines. What the classifier does is very intuitive: if the probability is greater than 0.5, it classifies the sample as type '1' (male), otherwise it classifies the sample to be class '0'. Thus in the diagram above, where we have plotted predicted values rather than actual labels of samples, there is a clear demarcation at the 0.5 probability line.
Again, this notion of trying to obtain the line or boundary of demarcation is what is called a **discriminative** classifier. The algorithm tries to find a decision boundary that separates the males from the females. To classify a new sample as male or female, it checks on which side of the decision boundary the sample falls, and makes a prediction. In other words we are asking, given $\v{x}$, what is the probability of a given $y$, or, what is the likelihood $P(y|\v{x},\v{w})$?
| github_jupyter |
```
import numpy as np
```
# Univariate Probability
In the example above, we demonstrated some code that generates fake data $X$ and $Y$. On the other hand, real data comes from the real world, not from some python code. For every dataset, there is an immensely complex network of causal interactions that ultimately "produces" the data.
For example, in our blood pressure example, a patient's pre-treatment vital signs are caused by their physiological state: their genetics, life history, what they ate for breakfast that morning, whether or not they just ran up a flight of stairs, and so on and so forth. Taking a drug influences the levels of certain chemicals in the blood, which are taken up at particular rates in certain organs by certain enzymes, the levels of which are impacted by the patient's genetics and prior physiological state, which was influenced by their life history, etc. Thus the impact of the drug on cellular processes is mediated by these factors. The cells respond by increasing or decreasing their production of some proteins or metabolites, which, in combination with the immediate condition of the patient when the measurement is taken, determines the post-treatment blood pressure.
Or, let's say we're trying to determine whether or not there is a cat in a photograph. The cat being in front of the camera when the photo was taken ($y_i$) could be caused by a huge number of factors, and the values of the pixels in the photograph ($x_i$) are caused by the reflection of photons emitted from sources of light off the cat (and other objects) and the mechanics of the detection of light inside the camera.
In a nutshell, the world is complicated. There is no way that mere mortals could ever write code accurate enough to perfectly simulate the exact processes that produce data about complex real-world phenomena.
But, despite the complexity, you should start thinking about that complex web of causality as "code" that's being run in some cosmic simulation. Maybe you can imagine that there are "data gods" that write and are running this code. We'll never see their code, and we'll never be able to understand it, but somewhere, out there, that metaphysical code is running, and it's generating the observations that we see in our data.
You can think of that code as a little "factory" that pumps out observations of $x_i$ and $y_i$, one at a time. The factory is behind a curtain that we can't ever look behind, but we can see the pile of $x_i$s and $y_i$s that come out of it, which are our $X$ and $Y$.

If we had that code, we'd be able to reverse engineer it to find the most likely value of $y_i$ given $x_i$ as accurately as would be possible with those predictors. In practice, however, we can only build a *model* of that code. Our model will never capture the complexities of reality, the same way that a model plane doesn't even begin to approach the complexity of a real aircraft. But, ideally, it will be similar enough in ways that are important for the task at hand: if we're using a model plane just to demonstrate what an aircraft might look like, we don't need the model to have functioning jet engines. And if all we need to do is estimate $y_i$ for a new $x_i$, we don't exactly need to understand the complex web of causality linking the two together.
We do, however, need a way to talk about the relationship that $x_i$ and $y_i$ might have. And to do that, we need a way to talk abstractly about the "code" or "data factory" that's behind the curtain, the same way we developed abstract terms to describe our data. Thankfully, the language of probability works perfectly for that.
## Random variables are factories that generate data
The data factories we're interested in are the kind that output $x_i$s and $y_i$s, but to understand how these factories work it's better to consider a simpler factory that produces one number at a time, instead of one vector $x_i$ and one number $y_i$.
We'll call our factory $\mathbf Z$. This factory pushes out one value $z_i$ at a time. Furthermore, let's say that half the time you get a $1$ and half the time you get a $0$; those are the only values that the $\mathbf Z$ factory can produce. And the factory is built to reset itself between producing each value, so whatever $z_i$ is has no impact on $z_{i+1}$.
In the language of probability theory, $z_i$ are **realizations** from $\mathbf Z$, which has a **distribution**:
$$
\begin{array}{rcl}
P(\mathbf Z = 0) &=& 1/2 \\
P(\mathbf Z = 1) &=& 1/2
\end{array}
\quad \quad \text{or} \quad \quad
P(\mathbf Z=z) =
\begin{cases}
1/2 & \text{for }z=0 \\
1/2 & \text{for }z=1
\end{cases}
$$
What we've been loosely calling a "factory" is a **random variable** in the language of probability theory. But that's just a name. You can keep thinking of them as factories, or code, that generate data.
<div class="alert alert-block alert-warning">
<b>Note:</b>
Random variables are often written in uppercase, (e.g. Z) and their realizations in lowercase (z). We're going to be using uppercase for matrices (and sets), so I'm going to use boldface in conjunction with uppercase ($\mathbf Z$) to denote random variables.
</div>
Ok, so if the random variable is a factory, and the realizations of the random variable are the output of that factory (the data we get to see), then how do we read a statement like $P(\mathbf Z = 0) = 1/2$? Well, that just means that the value $z$ that $\mathbf Z$ produces is $0$ half of the time. But what exactly do we mean by "half the time"? While we usually don't have to think deeper than this, you'll see later that it is sometimes necessary to have a more rigorous definition of probability.
<div class="alert alert-block alert-info">
<b>Exercise:</b>
Remember that the entire purpose of talking about these factories is so that we can imagine what's behind the curtain, producing the data that we observe. Think of a real-world scenario where we could pretend that the data we observe was generated by $\mathbf Z$. In other words, what's something we could measure in the real world that we might model using $\bf Z$?
</div>
Let's build that definition. We'll start with some raw materials. All factories have raw materials that go into them, which end up being turned into the finished product. In a similar way, random variables have inputs which get mapped to realized values. We'll call them "data ore": the unrefined precursor that gets transformed by our factory (random variable $\mathbf Z$) into the data product $z$. The data ore exists in units (data ore nuggets). The factory takes one nugget at a time and transforms it into a realization.
The nuggets are kept in an big silo called $\Omega$ before they go to $\mathbf Z$. This silo is filled to the brim with *all* of the possible nuggets that could be fed into the factory, one of each of them. It's also a magic silo, so when you take out a nugget, another one exactly like it is mined out of the depths of the cosmos to take its place in the silo.

Each nugget is gets transformed into a value of $z$, but the process isn't random. For instance, if a nugget named "Karl" turned into a 1 when fed through $\mathbf Z$, then we would *always* get a 1 when Karl goes into $\mathbf Z$. But we know that sometimes $\mathbf Z$ produces 0s, so there must be other nuggets whose destiny is to become 0s, just like Karl's destiny is to be a 1. The "randomness" in $\mathbf Z$ isn't caused by what's in the factory, it's caused by randomly picking a nugget to throw into it.
We can even code up our little example, imagining that we have 10 nuggets, boringly named "0", "1", "2"... "9":
```
def Z(ω): # factory (random variable)
if ω in set([1,4,5,8,9]): # these are the outcomes (nuggets) that map to the value 1
return 1
if ω in set([0,2,3,6,7]): # these are the outcomes (nuggets) that map to the value 0
return 0
Z.Ω = set([0,1,2,3,4,5,6,7,8,9]) # sample space (silo) of outcomes (ore nuggets) attached to Z
import random
def realize(rand_var): # run the assembly line!
ω = random.sample(rand_var.Ω, 1)[0] # grab a single nugget out of the silo at random
return rand_var(ω) # push it through the factory
```
<div class="alert alert-block alert-warning">
<b>Python Tip:</b>
`random.sample(x,n)` grabs `n` values at random out of the set `x` and returns them as a list.
</div>
Here are 20 observations $z=[z_1, z_2, \dots z_{20}]$, fresh off the assembly line of the $\mathbf Z$ factory:
```
z = [realize(Z) for i in range(20)]
z
```
Now we're ready to define probability: the probability of an realization (a particular value $z$) is just the proportion of the silo that's taken up by nuggets that are destined to become that value $z$ when fed through $\mathbf Z$. That's it. We denote that proportion with the notation $P(\mathbf Z = z)$. In our example above, saying $P(\mathbf Z = 1) = 1/2$ means that half of all the possible nuggets that could go into $\mathbf Z$ would produce a 1, assuming each nugget takes up the same amount of space.
That's a definition we can code up:
```
def P(rand_var, realization):
A = set(ω for ω in rand_var.Ω if rand_var(ω) in realization) # what are all the nuggets that map to the value(s) in question?
return len(A)/len(rand_var.Ω) # what is the "volume" of those nuggets relative to the volume of the silo Ω? (assuming each takes up the same amount of space)
P(Z,[0]), P(Z,[1]) # P(z=0), P(z=1)
```
So to build a factory that makes 0s and 1s in even proportions, all I had to do was evenly split up the number of nuggets that are destined to produce each value. It also doesn't matter what I call the nuggets. For example, here is equally good code to implement $\mathbf Z$:
```
def Z(ω): # factory (random variable)
if ω in set([-1234]): # these are the outcomes (nuggets) that map to the value 1
return 1
if ω in set([980123]): # these are the outcomes (nuggets) that map to the value 0
return 0
Z.Ω = set([980123, -1234]) # sample space (silo) of outcomes (ore nuggets) attached to Z
[realize(Z) for i in range(20)]
```
<div class="alert alert-block alert-info">
<b>Exercise:</b>
Write code for a new random variable $\mathbf W$ that behaves like this:
$$
P(\mathbf W=w) =
\begin{cases}
0.1 \dots & \text{for }w=-1 \\
0.4 \dots & \text{for }w=0 \\
0.2 & \text{for }w=1 \\
0.3 & \text{for }w=2
\end{cases}
$$
You'll need to make your own nugget silo `Ω` and define the function `W(ω)`. Test it out using the `realize()` and `P()` functions we wrote. Use `P()` to calculate $P(\mathbf W =0)$.
</div>
### A mathematical description of random variables

If you're looking at this and thinking that I can't possibly be serious, that the foundations of statistics and machine learning can't possibly be built up from imagining data factories and magical silos... well, you're wrong. Sure, I've concocted a somewhat elaborate metaphor, but it's a metaphor that accurately describes how these otherwise very abstract concepts relate to each other. If you can look at something like $P(\mathbf Z = z) := \mathbb P(\{\omega \in \Omega \vert \mathbf Z(w)=z\})$ and immediately come away with an understanding of what that means, all the more power to you. But I don't. At least not without first building up an intuition for each of the components.
In probability theory, the silo $\Omega$ is called a **sample space** and the data ore nuggets $\omega$ are called **outcomes** (not to be confused with what we call the variable we want to predict in machine learning). A random variable $\mathbf Z$ is defined as a function that maps an element $\omega$ of $\Omega$ to a realization $z$. The probability of a realization $z$ is the **measure** (volume, or proportion of total volume) of the set of outcomes (data ore nuggets) that map to $z$ (are destined to be transformed to $z$ by $\mathbf Z$).

When I talk about these things outside of the context of explaining them, I do call them by their real names (e.g. random variable, sample space, etc.) because that's what people have called them for nearly a century. But when I close my eyes and *reason* about these concepts, I'm thinking about something tangible, like a factory. As we go on I'm going to introduce more mathematical notation as we need it, and I'm going to wean off the factory metaphor, but I encourage you to keep building your intuition about these concepts instead of thinking about them as abstract symbols on a page. The symbols are just a convenient shorthand for the ideas. The only reason to know the standard names and symbols is to be able to read and understand what others have written. If you find yourself skimming over an equation- stop. Read it slowly and think about what each part means.
So now that we're here, let's demystify the notation in that equation I dropped up above! Here it is again:
$$P(\mathbf Z = z) := \mathbb P(\{\omega \in \Omega \vert \mathbf Z(w)=z\})$$
To start, the $:=$ means "the thing on the left is defined as the thing on the right". So we're saying that when we write "$P(\mathbf Z = z)$", we really mean whatever "$\mathbb P(\{\omega \in \Omega \vert \mathbf Z(\omega)=z\})$" is. Ok, next up is [set-builder notation](https://www.mathsisfun.com/sets/set-builder-notation.html): you can read $\{a\in A | f(a) = 1\}$ as "the collection of all the elements $a$ in the set $A$ *such that* $f(a)=1$". So $\{\omega \in \Omega \vert \mathbf Z(\omega)=z\}$ is the set of outcomes $\omega$ that become $z$ when passed through the random variable $\mathbf Z$. There may be many such outcomes, or just one, or none, so the set can be big, small, or nonexistent. We will write the name of that set a little more compactly using the notation $\mathbf Z^{-1}(z) = \{\omega \in \Omega \vert \mathbf Z(w)=z\}$ since usually $f^{-1}(y)$ denotes the element $x$ such that $f(x)=y$. We call this the **preimage** of $z$ under $\mathbf Z$.
<div class="alert alert-block alert-warning">
<b>Note:</b>
Preimages aren't just for random variables- you can define preimages for any function. If the function is $y=f(x)$, the preimage of a set $A$ (denoted $f^{-1}(A)$) is a set of all of the values $x$ that become one of the $y$ values in $A$ when shoved through $f$. The set $A$ is called the image of $f^{-1}(A)$ under $f$.
For example, if $f(x) = x^2$ and $A$ is the set of numbers between 0 and 4, then $f^{-1}(A)$ is the set of numbers between -2 and 2, since every number between -2 and 2, when squared, is between 0 and 4, and these are the only numbers for which that is the case. Another example: if $f(x) = \cos(x)$ and $A=\{1\}$, then $f^{-1}(A) = \{\dots, -4\pi, -2\pi, 0, 2\pi, 4\pi, 6\pi, \dots\}$. Plot or draw a picture of $\cos(x)$ and mark the points where $\cos(x) = 1$ to see why.
</div>
Finally, we have $\mathbb P()$, which is the [**probability measure**](https://en.wikipedia.org/wiki/Probability_measure). Think of it as a function that measures the proportion of all of the outcomes in $\Omega$ that are contained in the subset $\mathbf Z^{-1}(z)$. This is basically the volume of space that the nuggets in $\mathbf Z^{-1}(z)$ take up in the silo $\Omega$. By convention, we say that $\Omega$ has volume 1 so that the volume of $\mathbf Z^{-1}(z)$ is also the proportion of volume that $\mathbf Z^{-1}(z)$ takes up in $\Omega$. In the figure above, that's represented by the area of the shaded gray region.
If you put all of that together, you'll see that it's exactly the same as the definition we put together using our factory analogy.
We can also talk about the probability of sets of realizations instead of just single realization $z$. For instance, what's the probability that $z$ is 0 *or* 1? We write that like $P(\mathbf Z \in A)$, where $A$ is the set of possible realizations, like $\{0,1\}$. That's more general than the probability of a single realization $z$: $P(\mathbf Z = z)$. The definition is the same though: $P(\mathbf Z \in A) := \mathbb P(\mathbf Z^{-1}(A))$. All we need to do is count up the volume of all the nugets that produce any of the values that are in $A$, instead of just the nuggets that produce $z$.
<div class="alert alert-block alert-warning">
<b>REMEMBER:</b>
If your eyes glaze over every time you see mathematical notation, don't worry. Remember, that's normal. Just slow down and read it again. Try and think about what it <i>means</i>.
</div>
If you look at the code we wrote before, you'll notice it can already calculate probabilities for sets of realizations:
```
def P(rand_var, realization):
A = set(ω for ω in rand_var.Ω if rand_var(ω) in realization) # what are all the nuggets that map to the value(s) in question?
return len(A)/len(rand_var.Ω) # what is the "volume" of those nuggets relative to the volume of the silo Ω? (assuming each takes up the same amount of space)
P(Z, [0])
P(Z, [0,1])
```
### Properties of probability
Ok- I promised that it would be useful to define probability in a more rigorous way than "$z$ happens $x$% of the time". Now we're going to see why.
To start with, let's "derive" a relatively simple fact: for any subset of possible realizations $A$,
$$P(\mathbf Z \in A) \in [0,1]$$
This is a compact way of writing that for any subset of realizations, the volume of the subset of outcomes $\mathbf Z^{-1}(A)$ that map to those realzations is a number between 0 and 1. Why? Well, if the volume of our silo $\Omega$ is 1, the volume of any subset of that has to be less than or equal to 1. And there is no subset that can occupy negative space, so the volume has to be greater than or equal to 0.
Here's a trickier one: if two sets of realizations $A$ and $B$ have no realizations in common, then the probability of a realization from either of them is the sum of the probabilities of a realization from each of them. Mathematically:
$$A \cap B = 0 \rightarrow P(\mathbf Z \in A \cup B) = P(\mathbf Z \in A) + P(\mathbf Z \in B)$$
$A \cap B$ is read as "the intersection of the sets $A$ and $B$", which is the set of elements that are in both sets. It's the middle part of a Venn diagram. $A \cup B$ is read as "the union of $A$ and $B$", which is all of the elements in either set- that's the entirety of the Venn diagram.
That also seems cryptic until you think about it in terms of quantities of ore nuggets that produce certain values when fed through the factory. If you take all the ore nuggets that end up becoming any of the values in $A$ (call that set of nuggets $\mathbf Z^{-1}(A)$), and all the nuggets that end up becoming values in $B$ (call that $\mathbf Z^{-1}(B)$), then the total volume that end up becoming values in either $A$ or $B$ is the sum of the volumes that become $A$ and those that become $B$. This is true as long as there are no nuggets that become both a realization in $A$ and a realization in $B$ because we would double-count these. But we've also ensured that these do not exist since each nugget is destined to become only a single value, and we made sure that there is no overlap between $A$ and $B$.
If there is overlap, the proposition doesn't hold. For instance, if $A= \{0,1\}$ and $B = \{0\}$, every element of $B$ is also an element of $A$, so the volume of $Z^{-1}(A \cup B)$ is the volume of $Z^{-1}(A)$, which is not the volume of $Z^{-1}(A)$ plus the volume of $Z^{-1}(B)$.
We can even use our code from before to demonstrate this:
```
A = set([0])
B = set([1])
P(Z,A) + P(Z,B) == P(Z,A|B) # in python, set union ∪ is written | because an element is in A∪B if it is in A OR B (A|B)
A = set([0,1])
B = set([0])
P(Z,A) + P(Z,B) == P(Z,A|B)
```
<div class="alert alert-block alert-info">
<b>Exercise:</b>
Draw a picture based on the figure above that helps explain why $A \cap B = 0 \rightarrow P(\mathbf Z \in A \cup B) = P(\mathbf Z \in A) + P(\mathbf Z \in B)$
</div>
<div class="alert alert-block alert-info">
<b>Exercise:</b>
Let's say the sets $A$ and $B$ have some overlap. Can you come up with a formula to calculate $P(\mathbf Z \in A \cup B)$ given $P(\mathbf Z \in A)$, $P(\mathbf Z \in B)$, and $P(\mathbf Z \in A \cap B)$?
</div>
The upshot of this is that the probability of a set of outcomes is the same as the sum of their probabilities:
$$
P(\mathbf Z \in A)
=
\mathbb P (\mathbf Z^{-1}(A))
=
\sum_{\omega \in Z^{-1}(A)} \mathbb P(\omega)
=
\sum_{Z^{-1}(A)} \mathbb P(\omega)
$$
<div class="alert alert-block alert-info">
<b>Exercise:</b>
In all our code so far we've been using a finite sample set with $n$ outcomes and we've chosen to use $\mathbb P(B)= |B|/n$ where $|B|$ denotes the number of elements in $B$. That's called the <b>counting measure</b> It helps your understanding, however, to know that it isn't the only probability measure we could use. We could instead say that some outcomes take up twice as much space in the silo, or that they all have totally different volumes. As long as whatever $\mathbb P$ we come up with satisfies $\mathbb P(\Omega)=1$ and $\mathbb P(\bigcup B_i)= \sum \mathbb P(B_i)$ for non-overlapping sets $B_i$ (of outcomes), it's a legitimate choice.
Let's go back to this version of $\mathbf Z$:
```
def Z(ω): # factory (random variable)
if ω in set([1,4,5,8,9]): # these are the outcomes (nuggets) that map to the value 1
return 1
if ω in set([0,2,3,6,7]): # these are the outcomes (nuggets) that map to the value 0
return 0
Z.Ω = set([0,1,2,3,4,5,6,7,8,9]) # sample space (silo) of outcomes (ore nuggets) attached to Z
```
Change the code for `P(rand_var, realization)` so that $\mathbb P(\omega) = 0.25$ if $\omega \in \{0,1,2,3\}$ and 0 otherwise. The idea is that now nuggets 0, 1, 2, and 3 each take up a quarter of the space in the silo, while the other nuggets take up none. What is $P(Z=1)$ now?
</div>
### Continuous sample spaces
So far, all the random variables we've talked about have produced outputs from a finite, discrete set (e.g. $\{0,1\}$ or $\{-1,0,1,2\}$). If we're imagining a factory that might produce the data we observe when flipping a coin, a binary output is all we need. Similarly, if we want to imagine the factory that assigns an "apple", "orange", or "banana" label to a photograph of a fruit, it just needs to output a discrete set of three values. But if we want to imagine the kind of factory that could produce the prices of different apartments in New York, we need something that can output a continuous range of values.
Let's think up a random variable (call it $\bf Z$ again) that can take any value between 0 and 10. How many numbers are there between 0 and 10? Well, an infinite number: for any two numbers in that interval, you can find a number that's right between them. Since one nugget from the silo always prodcues the same realization when pushed through the factory, there need to be an infinite number of nuggets in the silo to be able to produce an infinite number of realizations. That means that our old code, where we manually enumerated all of the elements in $\Omega$, is not going to work anymore. What we can do instead is imagine that $\Omega$ is itself an interval, like all the numbers between 0 and 1. So, to pick a random nugget to throw into the factory, we just pick a random number between 0 and 1. Here's an example:
```
def Z(ω):
return 10*(ω**2) # when ω goes into the factory, the factory makes ω^2
Z.Ω = random.random # returns a single number between 0 and 1 when called
def realize_cont(rand_var): # run the assembly line!
ω = Z.Ω() # returns a single number between 0 and 1
return rand_var(ω) # push it through the factory
[realize_cont(Z) for i in range(5)]
```
So $\mathbf Z$ is defined by $\mathbf Z(\omega) = 10\omega^2$ with $\omega \in [0,1]$. Great. But now what does $P(\mathbf Z = z)$ mean? We just apply the same old definition of probability: it's the proportion of nuggets in the silo that are destined to become the value $z$. In notation: $\mathbb P(\mathbf Z^{-1}(z))$. Same as before.
<div class="alert alert-block alert-warning">
<b>Note:</b>
Notation like $[a,b]$ is often used to concisely write intervals- this just means "all the numbers between $a$ and $b$, including those endpoints". We use parentheses like $(a,b)$ to indicate that the endpoints should not be included. $(a,b]$ and $[a,b)$ have one of the two endpoints included, with the bracket indicating the endpoint that's included and the parenthesis indicating which isn't.
</div>
The issue is that now we need a probability measure that works with continuous sets. For example, let's say we're looking for $P(\mathbf Z = 2.5)$. As $\mathbf Z(\omega) = 10\omega^2$ is defined in the code above, the only value of $\omega$ that makes $z=2.5$ is $\omega = \sqrt{2.5/10} = 0.5$. Any other value of $\omega$ would produce a different value of $z$. So $\mathbf Z^{-1}(z) = 0.5$. What "volume" does the single number $0.5$ take up in the interval $[0,1]$? In other words, how are we going to define a probability measure to use here?
The most commonly used measure in this case is based on the "length" of the set relative to the length of $\Omega$. In our case, the length of $\Omega$ is 1, so the probability measure of any interval $(a,b)$ or $[a,b]$ is $b-a$. For sets more complicated than an interval, we have to find the smallest collection of intervals, in terms of total length, that contains the set in question. We say the length of that set is the total length of the collection of intervals that covers it. Using length as a notion of measure makes good sense because if two sets don't overlap, then the length of their union is the sum of their lengths. This measure is called the **Lebesgue measure**, but I only mention the name so you can recognize it elsewhere.
<div class="alert alert-block alert-warning">
<b>Note:</b>
When you get down to the nitty gritty math, it turns out there actually are <a href=https://en.wikipedia.org/wiki/Vitali_set>some really messed up sets</a> where this notion of "length" breaks down, in that the "length" of the union of two disjoint sets might not be the sum of their lengths. These are not sets you would ever come across in any real-world context. The technical solution is to only allow random variables where the preimage of any interval is not one of these messed up sets. This really isn't something you should think or worry about. This note is only here to satisfy nosy probabilists or measure theorists who were offended by the above paragraph.
</div>
Ok, back to our problem: what's $\mathbb P(0.5)$? Well, $\{0.5\} = [0.5, 0.5]$, so its length is $0.5-0.5=0$! In fact, for any single element $\omega$, $\mathbb P(\omega)= 0$ for the same reason. That's a problem if we want to use discrete sums to calculate probabilities over sets:
$$
P(\mathbf Z \in A)
\overset{?}{=}
\sum_{Z^{-1}(A)} \mathbb P(\omega)
=
\sum_{Z^{-1}(A)} 0
=
0
$$
But if $Z^{-1}(A)$ is an interval with finite length, then the probability has to be the length of that interval, not 0!
The reason this doesn't make any sense is that we're trying to use a discrete sum to add up a continuous infinity of 0s. Basically, we're trying to break down $\sum_{Z^{-1}(A)}$ into each of its component $\omega$s and measuring each of those. Instead of doing that, though, we can *integrate* over infinitesimal units of "$d \omega$":
$$
P(\mathbf Z \in A)
=
\int_{Z^{-1}(A)} \mathbb P(d\omega)
$$
This thing is called a **Lebesgue integral**. What we're doing here is adding up all of the infinitesimal lengths $\mathbb P(d\omega)$ for all $\omega$s in the set $\mathbf Z^{-1}(A)$. We'll write this as $\int d \mathbb P$ for short. It has all the same rules as a standard integral (just write $d \mathbb P$ instead of $dx$), so the integral of a sum is the sum of integrals, etc. And it always agrees with the integrals you're used to from calculus:
$$
\int_{[a,b]} f(\omega) d\mathbb P = \int_a^b f(x) dx
$$
The neat thing is that it actually works no matter what $\mathbb P$ is, as long as it satisfies all the properties of a measure. In fact, if $\mathbb P$ is the discrete counting measure that we were using before, then
$$
\int_{Z^{-1}(A)} d \mathbb P
=
\sum_{Z^{-1}(A)} \mathbb P(\omega)
$$
If you have no idea why any of this matters, don't worry, just keep going. We're not going to get into the theory of Lebesgue integration. I really went back and forth on whether to include this at all, but I did because having this unifying formalism in your back pocket makes it really easy to prove a lot of things later, even if you don't really understand the theoretical details. You'll be fine if you just think of a Lebesgue integral as a tool to find the volume of outcomes in arbitrary sets that happens to follow all the rules of a normal integral. In other words: no matter how you're measuring stuff, you can use the Lebesgue integral to figure out how much space different sets of outcomes take up.
## Probability distributions
The formal definition of a random variable as a function from a sample space to some set of numbers is really useful for proving useful relationships, but ultimately the sample space is totally imaginary: all we get to see are the realizations. So we're going to build some tools that will let us avoid talking about the sample space so much if we don't need to.
As perhaps you've noticed, neither the exact nature of what is in the sample space nor which of its elements map to which realizations change the observable behavior of a random variable as long as the total measure of all the outcomes mapping to each realization are the same. For example, we looked at two equivalent ways to implement our random variable $\mathbf Z$:
```
def Z(ω):
if ω in set([1,4,5,8,9]):
return 1
if ω in set([0,2,3,6,7]):
return 0
Z.Ω = set([0,1,2,3,4,5,6,7,8,9])
def Z(ω):
if ω in set([-1234]):
return 1
if ω in set([980123]):
return 0
Z.Ω = set([980123, -1234])
```
These are technically two different random variables because they have different sample spaces and different mappings to the realizations, but they behave exactly the same. When this is the case, we say they have the same **probability distribution**. The probability distribution describes how the factory should *behave* from the perspective of someone who can only see its products $z_i$: half the time you get a 0, half the time you get a 1. There is no need to mention the silo of ore nuggets, give them names, and specify which nuggets are destined to be 0s and which are destined to be 1s. We know they're back there, and we know what total *measure* are destined to be 0s and 1s (since that's what the probability means), but we don't need the details of who is who and what goes where. In fact, unless you're a probability theorist, you will never need to think about the sample space to solve a problem. The only reason you need to know about it is so that you can understand useful identities, which we will continue to derive as we go along.
The discrete probability distribution is function of the factory product $z$. For each unique value of $z$, it tells us the total volume of the nuggets in the silo that map to that outcome. We can visualize that by sorting all the nuggets in the silo into piles according to which value they are destined to become. The relative heights of each pile are proportional to the volume of space (measure) that each group of nuggets take up in the silo. Let's demonstrate with a new random variable $\bf V$:
```
def V(ω):
if ω in set([1]):
return 2
if ω in set([2,3]):
return 0
if ω in set([4,5,6]):
return -1
if ω in set([7,8,9,0]):
return 1
V.Ω = set(range(10))
vs = [-1,0,1,2] # all the values v can take
ps = [P(V,[v]) for v in vs] # calculate the probability of each, assuming the counting measure
import altair as alt # for plotting
import pandas as pd # to make dataframes
distribution = pd.DataFrame({'v':vs, 'p':ps})
alt.Chart(distribution, height=100, width=400).mark_bar().encode(x='v:O', y='p')
```
<div class="alert alert-block alert-warning">
<b>Python Tip:</b>
`altair` is a useful python package for visualization. It's optimized to work with dataframes from the `pandas` package. Feel free to browse the documentation for these packages, but you don't need to be an expert to continue on in this book.
</div>
This is the graphical representation of the probability distribution
$$
\phi(v)
=
P(V=v)
=
\mathbb P(\mathbf V^{-1}(v))
=
\begin{cases}
0.3 & \text{for }v=-1 \\
0.2 & \text{for }v=0 \\
0.4 & \text{for }v=1 \\
0.1 & \text{for }v=2
\end{cases}
$$
$\phi(v)$ is called a **probability mass function**. If we have multiple random variables floating around and we want to distinguish their mass functions, we'll sometimes write $\phi_{\mathbf V}(v)$.
If we want to know the probability of a particular set of realizations, say, $P(\mathbf V \in \{0,1\})$, it's easy to get using the mass function:
$$P(\mathbf V \in A) = \sum_{v \in A} \phi(v)$$
We simply sum up the probabilities that $\mathbf V$ is any of the realizations within the set $A$ of interest. Compare this to what we had before:
$$P(\mathbf V \in A) = \sum_{\omega \in \mathbf V^{-1}(A)} \mathbb P(\omega)$$
The advantage is that we don't have to talk about outcomes or sample spaces anymore. All of the information we need to calculate any probabilities of $\mathbf V$ is baked into the mass function $\phi(v)$.
<div class="alert alert-block alert-info">
<b>Exercise:</b>
Let's say $\mathbf V$ is a random variable that maps outcomes from the interval $[0,1]$ to either 0, 1, or 2 in the following way:
$$
\mathbf V(\omega)
=
\begin{cases}
0 & \text{if } \omega \in [0, 0.2) \cup (0.8,1] \\
1 & \text{if } \omega \in [0.2, 0.3) \cup (0.7, 0.8] \\
2 & \text{if } \omega \in [0.3, 0.7] \\
\end{cases}
$$
Note that $\mathbf V$ is discrete, but with a continuous sample space.
Assuming the Lebesgue measure, what is the mass function of $\mathbf V$? In other words, for each value that $\mathbf V$ can take, what's the total length of the set that produces each value?
Use the mass function to calculate $P(\mathbf V \in \{1,0\})$. You should get 0.6.
</div>
### Continuous random variables and densities
Let's say $\mathbf Z$ is defined by $\mathbf Z(\omega) = 10\omega^2$ with $\omega \in [0,1]$. How can we find some kind of function that we can manipulate to calculate probabilities without reference to the sample space or measure?
For starters, we do know how to calculate probabilities. For instance, if we want to know $P(\mathbf Z \in [0.625, 2.5])$, what we need to do is find $\mathbb P(\mathbf Z^{-1}([0.625, 2.5]))$, which is the "length" of the set $\mathbf Z^{-1}([0.625, 2.5])$ if we're using the Lebesgue measure. So what is $\mathbf Z^{-1}([0.625, 2.5])$? Well, $\mathbf Z(\omega) = 10\omega^2 \in [0.625, 2.5]$ is the same as saying $0.625 \le 10\omega^2 \le 2.5$. Dividing by 10 and taking square roots, we're left with $0.25 \le \omega \le 0.5$. So $\mathbf Z^{-1}([0.625, 2.5]) = [0.25, 0.5]$. The length of that set is clearly 0.25, so that's the probability we're looking for.
<div class="alert alert-block alert-info">
<b>Exercise:</b>
Let $\mathbf Z$ be as it is above. Find a formula for $P(\mathbf Z \in [a,b])$ for any values $a \le b$ and $a,b \in [0,10]$.
</div>
Mission accomplished? Not quite. We managed to calculate a probability given the sample space and random variable, but what we want is some kind of function that we can manipulate to calculate these probabilities without reference to the sample space at all. We don't want to have to think about what $\Omega$ is or exactly how the different outcomes map to the different realizations.
So here's an idea: let's bin $z$ into 10 non-overlapping buckets, like $[0,1)$, $[1,2)$ ... $[9,10]$ and calculate the probability within each of those buckets. This is just like what we did in the discrete case. We're sorting all of the nuggets in the silo into different piles depending which set of values they are destined to become, and then measuring the volume of each pile. Here's what we get when we do that for the example random variable $\bf z$ defined in the code above:
```
from math import sqrt
def Pz(a,b):
return sqrt(b/10) - sqrt(a/10)
zs = range(10)
ps = [Pz(z,z+1) for z in zs]
zs_labels = [f'[{z},{z+1})' for z in zs]
distribution = pd.DataFrame({'z':zs_labels, 'p':ps})
alt.Chart(distribution, height=100, width=400).mark_bar().encode(x='z:O', y='p')
```
And why stop at 10 buckets? Let's split it up into 100.
```
zs = np.arange(0,10,0.1)
ps = [Pz(z,z+0.1) for z in zs]
zs_labels = [f'[{z},{z+0.1})' for z in zs]
distribution = pd.DataFrame({'z':zs_labels, 'p':ps})
alt.Chart(distribution, height=100, width=400).mark_bar().encode(alt.X('z:O',axis=None), y='p')
```
More buckets gives us more information. If we want to know $P(\mathbf Z \in [0,0.5))$, for instance, we can sum up the probabilities for the buckets $[0,0.1)$, $[0.1,0.2)$, ... $[0.4,0.5)$. But we can't get *any* probability. The graph doesn't have enough information to let us calculate probabilities over intervals whose ends are between two cutpoints of the buckets. It only has resolution up to increments of $0.1$ in terms of $z$. It would be nice to have a graph that lets us read off arbitrary probabilities like $P(\mathbf Z \in [a,b])$ just by looking at how much "stuff" there is between $a$ and $b$. Something like this:
```
z = np.arange(0.1,10,0.1)
p = 1/(2*np.sqrt(10*z)) # magic, for now...
distribution = pd.DataFrame({'z':z, 'p':p})
alt.Chart(distribution, height=100, width=400).mark_area().encode(x='z', y='p')
```
Before I explain how I managed to make this graph, which is called a **density plot**, I want to establish an intuition for what it means. We've gone from 10 buckets, to 100 buckets, to "infinite" buckets. I like to think of these pictures literally: all the outcomes $\omega$ neatly piled up on top of the labels $z$ for the values they will become. So to get $P(\mathbf Z \in [a,b])$ from this picture, which is just the volume of outcomes that map to values between $a$ and $b$, all we need to do is see how much stuff there is piled up between $a$ and $b$ in the picture.

To do this, we turn to a useful tool from calculus: the integral. To make the picture above, we need a curve $\phi(z)$ such that the area under $f$ between $a$ and $b$ is $P(\mathbf Z \in [a,b])$ for all values $a$ and $b$. In the previous exercise you should have figured out that $P(\mathbf Z \in [a,b]) = \sqrt{\frac{b}{10}} - \sqrt{\frac{a}{10}}$. So what we need is the curve $\phi(z)$ that satisfies this equation:
$$\int_a^b \phi(z) dz = P(\mathbf Z \in [a,b]) = \sqrt{\frac{b}{10}} - \sqrt{\frac{a}{10}}$$
Looking at the integral equation, it's clear that $\Phi(z) = \sqrt{\frac{z}{10}}$ is the antiderivative of $\phi(z)$, so all we need to do to get $\phi$ is differentiate $\Phi$:
$$\phi(z) = \frac{d\Phi(z)}{dz} = \frac{d}{dz} \sqrt{\frac{z}{10}} = \frac{1}{2\sqrt{10z}}$$
That's why we have `ps = [1/(2*sqrt(10*z)) for z in zs]` in the code above.
The function $\phi(z)$ is called a **probability density function** (PDF), which is the continuous equivalent of the probability mass function. Its integral $\Phi(z) = \int_{-\infty}^z \phi(t)dt = P(\mathbf Z \le z)$ is called a **cumulative density function** (CDF). Either of these functions tell you everything you need to know about probabilities of the random variable $\mathbf Z$. The probability that $\mathbf Z$ takes any of the values in an arbitrary set $A$ is
$$P(\mathbf Z \in A) = \int_{A} \phi(z) dz$$
This works the same way as the probability mass function for a discrete random variable $\mathbf V$:
$$P(\mathbf V \in A) = \sum_{v \in A} \phi(v)$$
```
A = (1<=z) & (z<=4)
distribution = pd.DataFrame({'z':z, 'p':p, 'A':A})
alt.Chart(distribution, height=100, width=400).mark_area().encode(
x='z',
y='p'
) + alt.Chart(distribution.query('A')).mark_area(color='orange').encode(
x='z',
y='p'
)
```
For example, the probability that $\mathbf Z$ is in the set $[1,4]$ is the area shaded in orange above.
<div class="alert alert-block alert-warning">
<b>Note:</b>
The notation $\int_{A} \phi(z) dz$ just means $\int_{-\infty}^\infty I_A(z)\phi(z) dz$ where the <b>indicator function</b> $I_A(z)$ is 1 if $z\in A$ and 0 else. In othe words, all we're doing is summing up the $\phi(x)dx$s where $x \in A$. That's analogous to summing up the $\phi(v)$s where $v \in A$ in the discrete case.
</div>
<div class="alert alert-block alert-info">
<b>Exercise:</b>
For our random variable $\mathbf Z$ with density $\phi(z) =\frac{1}{2\sqrt{10z}}$, what is $P(\mathbf Z \in [0, 10])$? Calculate the probability by integrating the density function. Does your answer line up with what you expect based on our original definition of $\mathbf Z$?
Here is another random variable that, like $\mathbf Z$, maps outcomes in $\Omega = [0,1]$ to values in $[0,10]$: $\mathbf W(\omega) = 10\omega$. Calculate $P(\mathbf W \in [a,b])$ for some interval $[a,b]$. What is the probability density function for $\mathbf W$? What is $P(\mathbf W \in [0, 10])$?
For <i>any</i> continuous random variable $\mathbf X$, what is $\int_{-\infty}^{\infty} \phi(x) dx$ (<i>hint</i>: what probability does this represent)? What is $\Phi(-\infty)$? $\Phi(\infty)$?
Is it possible to have a random variable $\mathbf Q$ with $\phi_{\mathbf Q}(q) < 0$ for some $q$ that is a possible realization of $\mathbf Q$? Why does this not make sense?
For two values $a < b$, is it possible that $\Phi(a) > \Phi(b)$? Why nor why not?
</div>
At this point, talking about the outcomes $\omega$ is kind of silly. If two random variables have the same probability mass function or the same probability density function, then, for all intents and purposes, they are the same random variable. It doens't matter exactly which outcomes map to which values, as long as the proportions are the same. We already demonstrated this in the discrete case.
To show the same concept for continuous random variables, here is a new random variable $\mathbf Z'$ whose sample space $\Omega$ is $[-100, 100]$ instead of $[0,1]$, but which has the same probability density function as our other random variable $\mathbf Z$:
$$
\mathbf Z'(\omega) = 10\left(\frac{\omega+100}{200}\right)^2
$$
<div class="alert alert-block alert-info">
<b>Exercise:</b>
Prove to yourself that $\mathbf Z'$ has the same probability density function as $\mathbf Z$.
</div>
For this we use the notation $\mathbf Z \sim \mathbf Z'$. Technically they are not the same since the sample spaces are different, so we shouldn't write $\mathbf Z = \mathbf Z'$. But as far as an observer who is outside the curtain is concerned, there is no way to tell them apart. The *distribution* of the random variable is what really matters.
I think about mass or density functions as convenient abstraction layers between me and the random variable. If I want to know a probability, I don't have to go to the random variable and count up the volume of something in the sample space, I just "query" the mass or density. The "query engine" happens to be an integral or sum, and the query itself is the region of space that I want to integrate over. In a nutshell:
$$
\mathbb P(\mathbf Z^{-1}(A)) = \int_A \phi_{\mathbf Z}(z) dz
\quad
\text{or}
\quad
\mathbb P(\mathbf Z^{-1}(A)) = \sum_{z \in A} \phi_{\mathbf Z}(z)
$$
So if we have $\phi_Z$, we don't need to worry about figuring out what $\mathbf Z^{-1}(A)$ is or how to do the measurement of that set using $\mathbb P$. Finding preimages and measuring them is hard. Integrating or summing distribution functions is easier.
### Histograms vs. mass and density functions
Many of you are probably already familiar with histograms. Histograms are a way of visualizing observed data. Each observed value is stacked up on top of its approximate label (e.g. any $z$ between 0.5 and 1.5 is labeled "1") and the counts are plotted:
```
def Z(ω):
return 10*(ω**2) # when ω goes into the factory, the factory makes ω^2
Z.Ω = random.random # returns a single number between 0 and 1 when called
def realize_cont(rand_var): # run the assembly line!
ω = Z.Ω() # returns a single number between 0 and 1
return rand_var(ω) # push it through the factory
z = [realize_cont(Z) for i in range(1000)] # 1000 draws from Z
plot_df = pd.DataFrame({'z':z})
alt.Chart(plot_df, height=100, width=400).mark_bar().encode(
alt.X('z', bin=alt.Bin(maxbins=100)),
y='count()'
)
```
That looks suspicously like our bucketed density plot:
```
zs = np.arange(0,10,0.1)
ps = [Pz(z,z+0.1) for z in zs]
zs_labels = [f'[{z},{z+0.1})' for z in zs]
distribution = pd.DataFrame({'z':zs_labels, 'p':ps})
alt.Chart(distribution, height=100, width=400).mark_bar().encode(alt.X('z:O',axis=None), y='p')
```
So what's the difference? Think about what it is we're "stacking up" in the bars. In the histogram, we're sorting and stacking up a *finite number* $n$ of *observed values* $z_i$ according to what they are. In the density plot, we're sorting and stacking up *all* of the *outcomes* $\omega$ in the silo according to the values they are destined to become, and we're measuring their relative volume, not absolute counts.
In a nutshell, the histogram is what we can actually observe, given outputs from the factory. But the density descibes the inner workings of the factory itself, which we can never actually observe.
<div class="alert alert-block alert-info">
<b>Exercise:</b>
What do you expect to happen to the shape of the histogram above as the number of observations is increased from $1000$ to larger and larger numbers? Can you provide an intuitive explanation for why this happens?
</div>
### Common Distributions
We've seen that, for all practical purposes, a random variable is determined by its probability distribution (mass or density function). In reality, the distribution of any particular measurement (e.g. blood pressure) is unknown- it depends on a complex web of causal factors. The true density function is almost certainly so complex it's not even something that we could write down. But, for the purposes of *modeling* that measurement, we *pretend* that the density is something we can write down.
Over the centuries, people have come up with a lot of distributions that are useful as models across various scenarios. Here are a few of them:
#### Bernoulli distribution
Let's say we're interested in modeling the result of a coin flip. The actual value (heads/tails, which we code as 0/1) of the coin flip is determined by some insanely complicated physics, but we're going to pretend that the value comes out of a little factory called $\mathbf Z$ that has the following probability mass function:
$$
P(\mathbf Z=z) =
\begin{cases}
1/2 & \text{for }z=0 \\
1/2 & \text{for }z=1
\end{cases}
$$
If we want to model a biased coin that comes up heads $(p\times100)$% of the time, we can use a mass function like:
$$
P(\mathbf Z=z) =
\begin{cases}
p & \text{for }z=0 \\
1-p & \text{for }z=1
\end{cases}
$$
This is often written as $\mathbf Z \sim \text{Bernoulli}(p)$ (read: "$\mathbf Z$ is Bernoulli-distributed"). The number $p$ is said to be a **parameter** of the Bernoulli distribution. It would be more accurate to say that a random variable is distributed as a **member** of the Bernoulli **family** of distributions, since, technically, every different value of $p$ encodes a different distribution, or factory, for making data.
Another way to think about it is that there's one data factory, but it has a control panel with a knob labeled "$p$". If $p$ is set to 0.7, we expect about 70% of the outputs to be 1. If $p$ is set to $0.1$, 10%, and so on. It's a matter of semantics whether or not you want to say that factory is representing two different factories, or merely one factory under two different operating conditions. Both perspectives are useful.
#### Normal Distribution
Let's say we want to model the heights of everyone on Earth. We have an intuition that people are typically a bit shorter than two meters, and taller and shorter people are more and more rare the taller and shorter they get. We can pretend that height measurements come from a **normal** distribution (also called **Gaussian** distribution):
$$
\phi(z) =
\frac{1}{\sqrt{2\pi\sigma}}
e^{-\frac{(x-\mu)^2}{2\sigma^2}}
$$
Most often you'll see this written as $\mathbf Z \sim \mathcal N(\mu, \sigma)$ (read: "$\mathbf Z$ is normally distributed"). The numbers $\mu$ and $\sigma$ are the parameters (control knobs) of the normal distribution.

As you can see in the picture, $\mu$ controls where the "bell curve" is centered and $\sigma$ controls how wide or narrow it is.
<div class="alert alert-block alert-warning">
<b>Note:</b>
Every distribution is defined by its mass or density function $\phi$. The mass or density is often a complicated function, so instead of saying someting like "$\phi(z) = \frac{1}{\sqrt{2\pi\sigma}} e^{-\frac{(x-\mu)^2}{2\sigma^2}}$" every time we want a normally-distributed variable, we'll abbreviate that to "$\mathbf Z \sim \mathcal N (\mu, \sigma)$". But they mean the same thing.
Every time you see something like $\mathbf Z \sim \mathcal D(\theta_1, \theta_2, \dots)$, just know there is some mass or density function that is associated with the name $\mathcal D$ and which has parameters $\theta_1, \theta_2, \dots$. You can always look it up if you need to know exactly what it is.
</div>
#### Others
There are [hundreds](https://upload.wikimedia.org/wikipedia/commons/7/74/Normal_Distribution_PDF.svg) of well-studied distributions available to choose from when modeling. The most important thing to know about a distribution is what values it can generate. This is sometimes called the **support** of the distribution, since if you were to make a density or mass plot, the support would be the region of the x-axis that has positive density or mass, so it's the region that appears to be "supporting" the curve or mass.
For example, varaibles that are normally-, Cauchy-, or Laplace-distributed are supported on $-\infty$ and $\infty$. The $\chi^2$ distribution has support on $[0,\infty)$. The beta and standard uniform distributions have support on $[0,1]$. The Poisson distribution has support on the counting numbers 0, 1, 2..., and the K-categorical distribution has support on a finite number of integers 0, 1, 2, ... K.
It's also totally possible to invent your own distribution by defining your own support set $S$ and mass/density function $\phi$, as long as $\phi(s) \ge 0$ for all $s \in S$ and $\int_S \phi(s) ds = 1$ or $\sum_{s \in S} \phi(s) = 1$. These properties have to be satisfied to have a valid density or mass (see exercise in previous section).
The point of this diversity is that it is possible to model different kinds of data. Apartment rents are always positive numbers, but theoretically unbounded above (a scary thought), so perhaps $\chi^2$ is a good choice. The number of cars that pass through an intersection in a given day is always an integer, so Poisson is a reasonable choice for that. You don't have to remember any of these specific distributions or examples- just know there are many preconstructed pretend data factories out there to play with. Also know that the real data-generating process is pretty much *never* actually one of these distributions, although, sometimes, it might be well-approximated by one.
<div class="alert alert-block alert-info">
<b>Exercise:</b>
Define your own density function that has support on $[0,1]$. Make it so that the probability of getting a bigger number is bigger than that of getting a smaller number. Be sure that your function integrates to $1$ and is nonnegative over its support, otherwise it's not a valid density.
</div>
## Chapter summary
Data in the real world is generated by complex processes that we can't ever hope to replicate. But if we want to uncover relationships between measurements, we at least need a framework for imagining what kinds of processes might be generating our data. Random variables and probability theory do that for us.
Random variables are like factories that generate data. We don't observe them directly, but we see the data they output and we can imagine different kinds of random variables that make different kinds of data. We defined a notion of probability that posits that the probability of observing a particular realization is actually just the volume of material in the factory's silo (sample space) that is destined to become that realization. This is a pure abstraction, but it turns out to capture relationships between probabilities that we would intuitively expect to hold.
It's easier to work with the probability distribution of a random variable than it is to constantly talk about the sample space and the mapping between that space and realizations. The probability distribution is a function that, when integrated over a region of the space of realizations, gives us the volume of outcomes in the sample space that map to realizations in that region. In other words: the probability that the random variable gives a realization in that region. Random variables can be continuous or discrete, but all have a distribution function that can be integrated or summed to yield probabilities.
Random variables are most often talked about in terms of their porbability distributions. Defining a new variable is as easy as choosing a support and a mass or density function over that support. Some distributions are so commonly used that they have their own names and notations so that we don't have to write out their mass or density functions out over and over again to refer to them.
| github_jupyter |
# The basics of awkward arrays
At the front and formost of coffea is a completely new syntax for expressing analysis computations: `awkward arrays` and it's index based notation. For people coming from a
more traditional loop-based programming syntax, the syntax will take some getting use
to, but this tutorial can hopefully help you understand how to understand the syntax
and how to understand the various method.
Let use begin by first understanding what you need to explore the contents of a typical
ntuple file using coffea related tools. First you can download the dummy ntuples file and
the corresponding schema files from the main repository to your working directory:
```sh
cd <WORKINGDIRECTORY>
wget https://raw.githubusercontent.com/UMDCMS/CoffeaTutorial/main/samples/dummy_nanoevents.root
wget https://raw.githubusercontent.com/UMDCMS/CoffeaTutorial/main/samples/dummyschema.py
```
We can use the usual ROOT tools to look at the contents of the `dummy_nanoevent.root`
file. But let us focus on using coffea tools alone.
First import the relevent coffea objects:
```
from coffea.nanoevents import NanoEventsFactory
from dummyschema import DummySchema
import numpy as np
import awkward1 as ak
```
Now we can create the event list as an awkward array using coffea tools like:
```
events = NanoEventsFactory.from_root( 'file:dummy_nanoevents.root', # The file, notice the prefix `file:` for local file operation
'Events', # Name of the tree object to open
entry_stop=50, # Limit the number of events to process, nice for small scale debugging
schemaclass=DummySchema
).events()
```
The last `schemaclass` argument will be let unexplained for now, see the schema tutorials to
learn more about what this is. Here we have created the events as a awkward array. To see that
is stored in the array we can use:
```
print(events.fields)
```
Indicating the collections that are stored in the awkward array. To see how many events exists in in our file
we can use the typical python method:
```
print(len(events))
```
The 50 here in corresponds correctly to the `entry_stop` used in to open the file. Next we can, of course, start to explore the contents of the various object collections. One can acess the fields of the event as if it was a regular data memeber
```
print(events.Electron.fields)
print(events.Jet.fields)
```
Ah ha! We we are starting to see numbers we can play around with. Notice that coffea was written with High energy physics analysis in mind, so even if the electron energy doesn't look like it is stored from the output of the fields, we can still access methods that we will typically associate with 4-vectors. In particular, notice that we can call the `energy` field of the electron collection, even though the energy field isn't explicitly defined. Coffea is designed with 4 vectors in mind, so the energy collection is calculated on the fly.
```
print(events.Electron.pt)
print(events.Electron.energy)
```
Now, looking at the output, we can begin to get a grasp of what awkward arrays are: the variable `events.Electron.pt` variable represents a N events times A objects array of floating point, the `events.Electon` variable represents and N events times A objects time K fields of *collection* of floating point arrays, and the `events` variable reprents ths N times a certain set of collections (in this case three collections: `['Electron', 'Muon', and 'Jet']`) is recorded.
The "awkward" part of the array refers to two parts, first the value of `A` is differnt for each event and for each collection. In this demonstration, our first event has 2 electrons, the second event has 4 electron and so one. The second part of each collection can have a different number of fields. In a sense, the `events`, `Electron` and `pt` variables are just a easy way for represting the final `NxA` array that we might be intested in for the analysis. In our case the `N` number of events is whatis called as the outer most **dimension** or axis of the various objects, `A` is the one inner dimesion of the of array. `K` is not a true dimesion in the sense it can be though of a book keeping object used to keep track of how many awkward arrays are present, so in this sense, we can say the the `events.Electron` is a `NxA` object/collection array, as opposed to the `events.Electron.pt` being a `NxA` data array.
We can use the usual index notation to look at a particular object of interest. For instance if we want to look at the 0-th electron of the 1-st event in our event list, we can write:
```
print(events[1].Electron[0])
```
But the real power of using awkward arrays is for using awkward arrays comes in when you don't explicily use a concrete index, and instead call calculation all in an abstract form
## Basic object and event selection
Let us start with the most basic example of event selection. Say we want to select event with electrons that have $p_T > 50$ GeV and $|\eta| < 0.5$. The awkward array allows us to write something like:
```
mask_pt = events.Electron.pt > 50
mask_eta = np.abs(events.Electron.eta) < 0.5
ele_mask = mask_pt & mask_eta
print(mask_pt)
print(mask_eta)
print(ele_mask)
```
We can see that the usual logic comparision operators generate a `NxA` boolean array telling use which electron (or more specifically which electron.pt and electron etas) pass this particular selection criteia. This particular boolean array generated from a logic operation on usual arrays is typically call a `mask`. We can use the typical boolean operation `&` operation to get the intersect of multiple masks, or maybe the `|` operator for the union. Now the problem is where can we use this mask? The answer is any array that has a `NxA` structure and recieve these masks to create a reduced array!
```
print(events.Electron.pt[ele_mask])
print(events.Electron.eta[ele_mask])
selectedElectrons = events.Electron[ele_mask]
print(selectedElectrons.pt)
```
Probably the most important place to put the mask is the directly in the `events.Electron` index, this generates a new collection of electrons that preserves the `NxA` structure, but have verious collection instances filterd out. If you are familiar with `numpy`, this sort if index-based array filtering look familiar. The difference is that because awkward arrays accecpt arrays of varying inner dimensions, it can truely preserve the structure of such selection, rather than having everything be flattend out.
```
x = np.array([1,2,3,4,5,6,7,8,1,1,1,2])
print( x[x% 2 == 0])
y = np.array([[1,2,3,4],[5,6,7,8],[1,1,1,2]])
print( y[y%2==0])
z = ak.Array([[1,2,3,4],[5,6,7,8],[1,1,1,2]])
print(z[z%2==0])
```
Now suppose we only want events that have at least 1 electron selected event. What we need are a set of functions that can reduces this `NxA'` array to something of just dimesion `N`. Formally this is called **reduction** operations, and the awkward package has a large set of functions that can reduce the dimension of arrays. In our case, what we want is:
```
electron_count = ak.count(selectedElectrons.pt, axis=-1)
event_mask = electron_count >= 1
print(event_mask.__repr__)
```
To break this down, `ak.count`, as the method name suggests "counts" the number of elements along a certain axis, in our case, what we are intersted is the inner most dimension/axis, hence the typical python notation of `axis=-1`. Using this we can run the event selection using the usual masking notation:
```
selectedEvents = events[event_mask]
print(event_mask)
print(events.Electron.pt)
print(selectedEvents.Electron.pt)
print(len(selectedEvents))
```
Here we can confirm that the first event to pass the event selection is the 1-st event in the event list, and the 0-th instance in the `selectedEvents.Electron.pt` result of the selectedEvents indeed corresponds to the values stored in the 1-st event of the orignal event list.
## Object storce and collection creation
Having completed the selection, we might be rather annoyed that we didn't just store the selected Electron, since these are the objects that we are likely going to use for further calculation. Following from the code above, what we can do is add the additional selection to the `selectedElectrons` collections. This is valid since the `N` dimesional event mask "makes sense" performed on the `NxA'` dimesional selectedElectrons object.
```
our_selectedElectrons = selectedElectrons[event_mask]
print(our_selectedElectrons.pt)
print(len(our_selectedElectrons))
```
However, this is rather undesirable, since now we have some a whole bunch of detected collections, and event lists that we need to take care of: `selectedElectrons`, `selectedEvents`, `out_selectedEvents`. And this is with just one toy object selection. One can imagine if there isn't some sort of way to store collections into events, the analysis code will get out of hands very quick. This also ties into the topic that there might be certain physics quantities that are specific to a certain analysis that would might be used for the analysis object selection and would be nice to add to the electron collection if it isn't a standard variable that is maintained by the NanoAOD development team. Here we are going to add a very artificial example of calculating the inverse of the electron pt, then selecting on the inverse pt. This very simple example will demonstrate the typical syntax used for storing variables as well as exposing one of the parculiar quirks of awkward arrays:
```
print('First attempt at adding extended variables to events')
events.Electron['invpt'] = 1/events.Electron.pt
events['selectedElectron_1'] = events.Electron[events.Electron.pt > 50]
print(events.fields)
print(events.Electron.fields)
print(events.selectedElectron_1.fields)
print('\n\nSecond attemp at adding extended variables to events')
events['myElectron'] = events.Electron[:]
events.myElectron['invpt'] = 1/events.myElectron.pt
events['selectedElectron_2'] = events.myElectron[events.myElectron.pt > 50]
print(events.fields)
print(events.myElectron.fields)
print(events.selectedElectron_2.fields)
print('\n\nThird attemp at adding extended variables to events')
myElectron = events.Electron[:]
myElectron['invpt'] = 1/myElectron.pt
events['selectedElectron_3'] = myElectron[myElectron.pt > 50]
print(events.fields)
print(myElectron.fields)
print(events.selectedElectron_3.fields)
```
Lets get the straightforward part of the code clear up. The addition of collections looks very straight forward, one can think of the `events` as something that looks like a "dictionary of collection with a common outer dimension", so the addition of the two electron collections to the event has a very distionary-esque notation. What is strange is the persistence of the extended collection for the electrons. Logically, the operation looks identical, but the first attempt to add the new variable `invpt` directly to `events.Electron` fails to persist, and thus all direct extensions of `events.Electron` doesn't include the new `invpt` field.
The reason for this is rather technical regarding the mutability of objects in python and awkward. The rule-of-thumb is that collections that are directly generated from the file, (a.k.a. the collections directly obtained listed the `events.fields` immediate after opening a file) can **never** be altered, and therefore cannot have extended variables added to them. To create an extended variable to some collection, we will need to make some sort of copy of the original either by some trivial kinematic selection (ex. `myElectrons = events.Electrons[events.Electrons.pt > 0]`) or some trivial splicing (`myElectrons = events.Electrons[:]`). Another feature of mutability is that once the collection is added to the event collection, it becomes immutable. That is why the third attempt is the one that adds the both the electron extended variable and the extednded electron collection to the event.
Because of these quirks, it would typically be worth it to wrap the object selection into a function if the object selection is typical within an analysis, and it also helps with code readability
```
def SelectElectrons(electron):
electron = electron[electron.pt > 50]
electron['invpt'] = 1.0 / electron.pt
return electron
events['selectedElectron_f'] = SelectElectrons(events.Electron)
print(events.fields)
print(events.selectedElectron_f.fields)
```
Once the new object collection has been added to the event collection, they will persist to arbitrary levels of event selection:
```
myevents = events[ak.count(events.selectedElectron_f.pt,axis=-1) > 0 ]
print(myevents.fields)
print(myevents.selectedElectron_f.fields)
myevents = events[ak.count(events.selectedElectron_f.pt,axis=-1) > 1 ]
print(myevents.fields)
print(myevents.selectedElectron_f.fields)
myevents = events[ak.count(events.selectedElectron_f.pt,axis=-1) > 2 ]
print(myevents.fields)
print(myevents.selectedElectron_f.fields)
```
## Summary of basics
So to put this together into a single code block, suppose our analysis consisten of selecting events that have at least 2 electron with $p_{T} > 50GeV$, $|\eta| < 0.5$, and we want to calculate the average of all such electron's iverserse $p_{T}$ within the selected events. Our awkward array code would look something like:
```
events = NanoEventsFactory.from_root( 'file:dummy_nanoevents.root',
'Events',
entry_stop=50,
schemaclass=DummySchema).events()
## Object selection
selectedElectron = events.Electron[ (events.Electron.pt > 50) &
(np.abs(events.Electron.eta)<0.5) ]
selectedElectron['invpt'] = 1/selectedElectron.pt
events['selectedElectron'] = selectedElectron
# Event selection
events = events[ak.count(events.selectedElectron.pt,axis=-1) >= 2]
# Calculating the total average
print(ak.sum(events.selectedElectron.invpt)/ak.count(events.selectedElectron.invpt))
```
On total this is 4 statements (not counting the file reading step) used to make this analysis. Compare that with the loop based notation:
```
events = NanoEventsFactory.from_root( 'file:dummy_nanoevents.root',
'Events',
entry_stop=50,
schemaclass=DummySchema).events()
count = 0
suminv = 0
for i in range(len(events)):
is_good = []
for j in range(len(events[i].Electron)):
if events[i].Electron[j].pt > 50 and np.abs(events[i].Electron[j].eta) < 0.5:
is_good.append(j)
if len(is_good) >= 2:
for j in is_good:
count = count +1
suminv += 1.0/ events[i].Electron[j].pt
print(suminv/count)
```
Notice the results are only difference because the 32bit to 64 bit float conversion is happening at different places. For awkward arrays, this is happening only after the sum has been performed. For the loop based approach this happening everytime the `+=` operator is called.
For the loop based analysis, notice for such a simple analysis, many many lines of code are dedicated to just book keeping stuff: number of electrons passing criteria, adding a counter variable and sum variable... etc, instead of actualy analysis computation. The array based notation for expressing the analysis is much cleaner, if rather more unfamiliar to typical users.
Of course, this isn't the end. Physics analysis are typically more involved that just basic selection and counting. In the next session, we will talk about how to perform more involed calculations with awkward arrays that involves multiple collections within an event collection.
| github_jupyter |
# Multi-Layer Perceptron, MNIST
---
In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.
The process will be broken down into the following steps:
>1. Load and visualize the data
2. Define a neural network
3. Train the model
4. Evaluate the performance of our trained model on a test dataset!
Before we begin, we have to import the necessary libraries for working with data and PyTorch.
```
# import libraries
import torch
import numpy as np
```
---
## Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)
Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.
This cell will create DataLoaders for each of our datasets.
```
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
```
### Visualize a Batch of Training Data
The first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
```
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
```
### View an Image in More Detail
```
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
```
---
## Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)
The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
```
import torch.nn as nn
import torch.nn.functional as F
## TODO: Define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# linear layer (784 -> 1 hidden node)
self.fc1 = nn.Linear(28 * 28, 1)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
return x
# initialize the NN
model = Net()
print(model)
```
### Specify [Loss Function](http://pytorch.org/docs/stable/nn.html#loss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)
It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
```
## TODO: Specify loss and optimization functions
# specify loss function
criterion = None
# specify optimizer
optimizer = None
```
---
## Train the Network
The steps for training/learning from a batch of data are described in the comments below:
1. Clear the gradients of all optimized variables
2. Forward pass: compute predicted outputs by passing inputs to the model
3. Calculate the loss
4. Backward pass: compute gradient of the loss with respect to model parameters
5. Perform a single optimization step (parameter update)
6. Update average training loss
The following loop trains for 30 epochs; feel free to change this number. For now, we suggest somewhere between 20-50 epochs. As you train, take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
```
# number of epochs to train the model
n_epochs = 30 # suggest training between 20-50 epochs
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
```
---
## Test the Trained Network
Finally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
#### `model.eval()`
`model.eval(`) will set all the layers in your model to evaluation mode. This affects layers like dropout layers that turn "off" nodes during training with some probability, but should allow every node to be "on" for evaluation!
```
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for *evaluation*
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
```
### Visualize Sample Test Results
This cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
```
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
```
| github_jupyter |
<a href="https://colab.research.google.com/github/gordicaleksa/get-started-with-JAX/blob/main/Tutorial_3_JAX_Neural_Network_from_Scratch_Colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# MLP training on MNIST
```
import numpy as np
import jax.numpy as jnp
from jax.scipy.special import logsumexp
import jax
from jax import jit, vmap, pmap, grad, value_and_grad
from torchvision.datasets import MNIST
from torch.utils.data import DataLoader
seed = 0
mnist_img_size = (28, 28)
def init_MLP(layer_widths, parent_key, scale=0.01):
params = []
keys = jax.random.split(parent_key, num=len(layer_widths)-1)
for in_width, out_width, key in zip(layer_widths[:-1], layer_widths[1:], keys):
weight_key, bias_key = jax.random.split(key)
params.append([
scale*jax.random.normal(weight_key, shape=(out_width, in_width)),
scale*jax.random.normal(bias_key, shape=(out_width,))
]
)
return params
# test
key = jax.random.PRNGKey(seed)
MLP_params = init_MLP([784, 512, 256, 10], key)
print(jax.tree_map(lambda x: x.shape, MLP_params))
def MLP_predict(params, x):
hidden_layers = params[:-1]
activation = x
for w, b in hidden_layers:
activation = jax.nn.relu(jnp.dot(w, activation) + b)
w_last, b_last = params[-1]
logits = jnp.dot(w_last, activation) + b_last
# log(exp(o1)) - log(sum(exp(o1), exp(o2), ..., exp(o10)))
# log( exp(o1) / sum(...) )
return logits - logsumexp(logits)
# tests
# test single example
dummy_img_flat = np.random.randn(np.prod(mnist_img_size))
print(dummy_img_flat.shape)
prediction = MLP_predict(MLP_params, dummy_img_flat)
print(prediction.shape)
# test batched function
batched_MLP_predict = vmap(MLP_predict, in_axes=(None, 0))
dummy_imgs_flat = np.random.randn(16, np.prod(mnist_img_size))
print(dummy_imgs_flat.shape)
predictions = batched_MLP_predict(MLP_params, dummy_imgs_flat)
print(predictions.shape)
def custom_transform(x):
return np.ravel(np.array(x, dtype=np.float32))
def custom_collate_fn(batch):
transposed_data = list(zip(*batch))
labels = np.array(transposed_data[1])
imgs = np.stack(transposed_data[0])
return imgs, labels
batch_size = 128
train_dataset = MNIST(root='train_mnist', train=True, download=True, transform=custom_transform)
test_dataset = MNIST(root='test_mnist', train=False, download=True, transform=custom_transform)
train_loader = DataLoader(train_dataset, batch_size, shuffle=True, collate_fn=custom_collate_fn, drop_last=True)
test_loader = DataLoader(test_dataset, batch_size, shuffle=False, collate_fn=custom_collate_fn, drop_last=True)
# test
batch_data = next(iter(train_loader))
imgs = batch_data[0]
lbls = batch_data[1]
print(imgs.shape, imgs[0].dtype, lbls.shape, lbls[0].dtype)
# optimization - loading the whole dataset into memory
train_images = jnp.array(train_dataset.data).reshape(len(train_dataset), -1)
train_lbls = jnp.array(train_dataset.targets)
test_images = jnp.array(test_dataset.data).reshape(len(test_dataset), -1)
test_lbls = jnp.array(test_dataset.targets)
num_epochs = 5
def loss_fn(params, imgs, gt_lbls):
predictions = batched_MLP_predict(params, imgs)
return -jnp.mean(predictions * gt_lbls)
def accuracy(params, dataset_imgs, dataset_lbls):
pred_classes = jnp.argmax(batched_MLP_predict(params, dataset_imgs), axis=1)
return jnp.mean(dataset_lbls == pred_classes)
@jit
def update(params, imgs, gt_lbls, lr=0.01):
loss, grads = value_and_grad(loss_fn)(params, imgs, gt_lbls)
return loss, jax.tree_multimap(lambda p, g: p - lr*g, params, grads)
# Create a MLP
MLP_params = init_MLP([np.prod(mnist_img_size), 512, 256, len(MNIST.classes)], key)
for epoch in range(num_epochs):
for cnt, (imgs, lbls) in enumerate(train_loader):
gt_labels = jax.nn.one_hot(lbls, len(MNIST.classes))
loss, MLP_params = update(MLP_params, imgs, gt_labels)
if cnt % 50 == 0:
print(loss)
print(f'Epoch {epoch}, train acc = {accuracy(MLP_params, train_images, train_lbls)} test acc = {accuracy(MLP_params, test_images, test_lbls)}')
imgs, lbls = next(iter(test_loader))
img = imgs[0].reshape(mnist_img_size)
gt_lbl = lbls[0]
print(img.shape)
import matplotlib.pyplot as plt
pred = jnp.argmax(MLP_predict(MLP_params, np.ravel(img)))
print('pred', pred)
print('gt', gt_lbl)
plt.imshow(img); plt.show()
```
# Visualizations
```
w = MLP_params[0][0]
print(w.shape)
w_single = w[500, :].reshape(mnist_img_size)
print(w_single.shape)
plt.imshow(w_single); plt.show()
# todo: visualize embeddings using t-SNE
from sklearn.manifold import TSNE
def fetch_activations(params, x):
hidden_layers = params[:-1]
activation = x
for w, b in hidden_layers:
activation = jax.nn.relu(jnp.dot(w, activation) + b)
return activation
batched_fetch_activations = vmap(fetch_activations, in_axes=(None, 0))
imgs, lbls = next(iter(test_loader))
batch_activations = batched_fetch_activations(MLP_params, imgs)
print(batch_activations.shape) # (128, 2)
t_sne_embeddings = TSNE(n_components=2, perplexity=30,).fit_transform(batch_activations)
cora_label_to_color_map = {0: "red", 1: "blue", 2: "green", 3: "orange", 4: "yellow", 5: "pink", 6: "gray"}
for class_id in range(10):
plt.scatter(t_sne_embeddings[lbls == class_id, 0], t_sne_embeddings[lbls == class_id, 1], s=20, color=cora_label_to_color_map[class_id])
plt.show()
# todo: dead neurons
def fetch_activations2(params, x):
hidden_layers = params[:-1]
collector = []
activation = x
for w, b in hidden_layers:
activation = jax.nn.relu(jnp.dot(w, activation) + b)
collector.append(activation)
return collector
batched_fetch_activations2 = vmap(fetch_activations2, in_axes=(None, 0))
imgs, lbls = next(iter(test_loader))
MLP_params2 = init_MLP([np.prod(mnist_img_size), 512, 256, len(MNIST.classes)], key)
batch_activations = batched_fetch_activations2(MLP_params2, imgs)
print(batch_activations[1].shape) # (128, 512/256)
dead_neurons = [np.ones(act.shape[1:]) for act in batch_activations]
for layer_id, activations in enumerate(batch_activations):
dead_neurons[layer_id] = np.logical_and(dead_neurons[layer_id], (activations == 0).all(axis=0))
for layers in dead_neurons:
print(np.sum(layers))
```
# Parallelization
```
```
| github_jupyter |
# Plots
One of the most amazing feature of hist is it's powerful plotting family. Here you can see how to plot Hist.
```
from hist import Hist
import hist
h = Hist(
hist.axis.Regular(50, -5, 5, name="S", label="s [units]", flow=False),
hist.axis.Regular(50, -5, 5, name="W", label="w [units]", flow=False),
)
import numpy as np
s_data = np.random.normal(size=100_000) + np.ones(100_000)
w_data = np.random.normal(size=100_000)
# normal fill
h.fill(s_data, w_data)
```
## Via Matplotlib
hist allows you to plot via [Matplotlib](https://matplotlib.org/) like this:
```
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(8, 5))
w, x, y = h.to_numpy()
mesh = ax.pcolormesh(x, y, w.T, cmap="RdYlBu")
ax.set_xlabel("s")
ax.set_ylabel("w")
fig.colorbar(mesh)
plt.show()
```
## Via Mplhep
[mplhep](https://github.com/scikit-hep/mplhep) is an important visualization tools in Scikit-Hep ecosystem. hist has integrate with mplhep and you can also plot using it. If you want more info about mplhep please visit the official repo to see it.
```
import mplhep
fig, axs = plt.subplots(1, 2, figsize=(9, 4))
mplhep.histplot(h.project("S"), ax=axs[0])
mplhep.hist2dplot(h, ax=axs[1])
plt.show()
```
## Via Plot
Hist has plotting methods for 1-D and 2-D histograms, `.plot1d()` and `.plot2d()` respectively. It also provides `.plot()` for plotting according to the its dimension. Moreover, to show the projection of each axis, you can use `.plot2d_full()`. If you have a Hist with higher dimension, you can use `.project()` to extract two dimensions to see it with our plotting suite.
Our plotting methods are all based on Matplotlib, so you can pass Matplotlib's `ax` into it, and hist will draw on it. We will create it for you if you do not pass them in.
```
# plot1d
fig, ax = plt.subplots(figsize=(6, 4))
h.project("S").plot1d(ax=ax, ls="--", color="teal", lw=3)
plt.show()
# plot2d
fig, ax = plt.subplots(figsize=(6, 6))
h.plot2d(ax=ax, cmap="plasma")
plt.show()
# plot2d_full
plt.figure(figsize=(8, 8))
h.plot2d_full(
main_cmap="coolwarm",
top_ls="--",
top_color="orange",
top_lw=2,
side_ls=":",
side_lw=2,
side_color="steelblue",
)
plt.show()
# auto-plot
fig, axs = plt.subplots(1, 2, figsize=(9, 4), gridspec_kw={"width_ratios": [5, 4]})
h.project("W").plot(ax=axs[0], color="darkviolet", lw=2, ls="-.")
h.project("W", "S").plot(ax=axs[1], cmap="cividis")
plt.show()
```
## Via Plot Pull
Pull plots are commonly used in HEP studies, and we provide a method for them with `.plot_pull()`, which accepts a `Callable` object, like the below `pdf` function, which is then fit to the histogram and the fit and pulls are shown on the plot. As Normal distributions are the generally desired function to fit the histogram data, the `str` aliases `"normal"`, `"gauss"`, and `"gaus"` are supported as well.
```
def pdf(x, a=1 / np.sqrt(2 * np.pi), x0=0, sigma=1, offset=0):
return a * np.exp(-((x - x0) ** 2) / (2 * sigma ** 2)) + offset
np.random.seed(0)
hist_1 = hist.Hist(
hist.axis.Regular(
50, -5, 5, name="X", label="x [units]", underflow=False, overflow=False
)
).fill(np.random.normal(size=1000))
fig = plt.figure(figsize=(10, 8))
main_ax_artists, sublot_ax_arists = hist_1.plot_pull(
"normal",
eb_ecolor="steelblue",
eb_mfc="steelblue",
eb_mec="steelblue",
eb_fmt="o",
eb_ms=6,
eb_capsize=1,
eb_capthick=2,
eb_alpha=0.8,
fp_c="hotpink",
fp_ls="-",
fp_lw=2,
fp_alpha=0.8,
bar_fc="royalblue",
pp_num=3,
pp_fc="royalblue",
pp_alpha=0.618,
pp_ec=None,
ub_alpha=0.2,
)
```
## Via Plot Ratio
You can also make an arbitrary ratio plot using the `.plot_ratio` API:
```
hist_2 = hist.Hist(
hist.axis.Regular(
50, -5, 5, name="X", label="x [units]", underflow=False, overflow=False
)
).fill(np.random.normal(size=1700))
fig = plt.figure(figsize=(10, 8))
main_ax_artists, sublot_ax_arists = hist_1.plot_ratio(
hist_2,
rp_ylabel=r"Ratio",
rp_num_label="hist1",
rp_denom_label="hist2",
rp_uncert_draw_type="bar", # line or bar
)
```
Ratios between the histogram and a callable, or `str` alias, are supported as well
```
fig = plt.figure(figsize=(10, 8))
main_ax_artists, sublot_ax_arists = hist_1.plot_ratio(pdf)
```
Using the `.plot_ratio` API you can also make efficiency plots (where the numerator is a strict subset of the denominator)
```
hist_3 = hist_2.copy() * 0.7
hist_2.fill(np.random.uniform(-5, 5, 600))
hist_3.fill(np.random.uniform(-5, 5, 200))
fig = plt.figure(figsize=(10, 8))
main_ax_artists, sublot_ax_arists = hist_3.plot_ratio(
hist_2,
rp_num_label="hist3",
rp_denom_label="hist2",
rp_uncert_draw_type="line",
rp_uncertainty_type="efficiency",
)
```
| github_jupyter |
# Hands-on Federated Learning: Image Classification
In their recent (and exteremly thorough!) review of the federated learning literature [*Kairouz, et al (2019)*](https://arxiv.org/pdf/1912.04977.pdf) define federated learning as a machine learning setting where multiple entities (clients) collaborate in solving a machine learning problem, under the coordination of a central server or service provider. Each client’s raw data is stored locally and not exchanged or transferred; instead, focused updates intended for immediate aggregation are used to achieve the learning objective.
In this tutorial we will use a federated version of the classic MNIST dataset to introduce the Federated Learning (FL) API layer of TensorFlow Federated (TFF), [`tff.learning`](https://www.tensorflow.org/federated/api_docs/python/tff/learning) - a set of high-level interfaces that can be used to perform common types of federated learning tasks, such as federated training, against user-supplied models implemented in TensorFlow or Keras.
# Preliminaries
```
import collections
import os
import typing
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow_federated as tff
# required to run TFF inside Jupyter notebooks
import nest_asyncio
nest_asyncio.apply()
tff.federated_computation(lambda: 'Hello, World!')()
```
# Preparing the data
In the IID setting the local data on each "client" is assumed to be a representative sample of the global data distribution. This is typically the case by construction when performing data parallel training of deep learning models across multiple CPU/GPU "clients".
The non-IID case is significantly more complicated as there are many ways in which data can be non-IID and different degress of "non-IIDness". Consider a supervised task with features $X$ and labels $y$. A statistical model of federated learning involves two levels of sampling:
1. Sampling a client $i$ from the distribution over available clients $Q$
2. Sampling an example $(X,y)$ from that client’s local data distribution $P_i(X,y)$.
Non-IID data in federated learning typically refers to differences between $P_i$ and $P_j$ for different clients $i$ and $j$. However, it is worth remembering that both the distribution of available clients, $Q$, and the distribution of local data for client $i$, $P_i$, may change over time which introduces another dimension of “non-IIDness”. Finally, if the local data on a client's device is insufficiently randomized, perhaps ordered by time, then independence is violated locally as well.
In order to facilitate experimentation TFF includes federated versions of several popular datasets that exhibit different forms and degrees of non-IIDness.
```
# What datasets are available?
tff.simulation.datasets.
```
This tutorial uses a version of MNIST that contains a version of the original NIST dataset that has been re-processed using [LEAF](https://leaf.cmu.edu/) so that the data is keyed by the original writer of the digits.
The federated MNIST dataset displays a particular type of non-IIDness: feature distribution skew (covariate shift). Whith feature distribution skew the marginal distributions $P_i(X)$ vary across clients, even though $P(y|X)$ is shared. In the federated MNIST dataset users are writing the same numbers but each user has a different writing style characterized but different stroke width, slant, etc.
```
tff.simulation.datasets.emnist.load_data?
emnist_train, emnist_test = (tff.simulation
.datasets
.emnist
.load_data(only_digits=True, cache_dir="../data"))
NUMBER_CLIENTS = len(emnist_train.client_ids)
NUMBER_CLIENTS
def sample_client_ids(client_ids: typing.List[str],
sample_size: typing.Union[float, int],
random_state: np.random.RandomState) -> typing.List[str]:
"""Randomly selects a subset of clients ids."""
number_clients = len(client_ids)
error_msg = "'client_ids' must be non-emtpy."
assert number_clients > 0, error_msg
if isinstance(sample_size, float):
error_msg = "Sample size must be between 0 and 1."
assert 0 <= sample_size <= 1, error_msg
size = int(sample_size * number_clients)
elif isinstance(sample_size, int):
error_msg = f"Sample size must be between 0 and {number_clients}."
assert 0 <= sample_size <= number_clients, error_msg
size = sample_size
else:
error_msg = "Type of 'sample_size' must be 'float' or 'int'."
raise TypeError(error_msg)
random_idxs = random_state.randint(number_clients, size=size)
return [client_ids[i] for i in random_idxs]
# these are what the client ids look like
_random_state = np.random.RandomState(42)
sample_client_ids(emnist_train.client_ids, 10, _random_state)
def create_tf_datasets(source: tff.simulation.ClientData,
client_ids: typing.Union[None, typing.List[str]]) -> typing.Dict[str, tf.data.Dataset]:
"""Create tf.data.Dataset instances for clients using their client_id."""
if client_ids is None:
client_ids = source.client_ids
datasets = {client_id: source.create_tf_dataset_for_client(client_id) for client_id in client_ids}
return datasets
def sample_client_datasets(source: tff.simulation.ClientData,
sample_size: typing.Union[float, int],
random_state: np.random.RandomState) -> typing.Dict[str, tf.data.Dataset]:
"""Randomly selects a subset of client datasets."""
client_ids = sample_client_ids(source.client_ids, sample_size, random_state)
client_datasets = create_tf_datasets(source, client_ids)
return client_datasets
_random_state = np.random.RandomState()
client_datasets = sample_client_datasets(emnist_train, sample_size=1, random_state=_random_state)
(client_id, client_dataset), *_ = client_datasets.items()
fig, axes = plt.subplots(1, 5, figsize=(12,6), sharex=True, sharey=True)
for i, example in enumerate(client_dataset.take(5)):
axes[i].imshow(example["pixels"].numpy(), cmap="gray")
axes[i].set_title(example["label"].numpy())
_ = fig.suptitle(x= 0.5, y=0.75, t=f"Training examples for a client {client_id}", fontsize=15)
```
## Data preprocessing
Since each client dataset is already a [`tf.data.Dataset`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset), preprocessing can be accomplished using Dataset transformations. Another option would be to use preprocessing operations from [`sklearn.preprocessing`](https://scikit-learn.org/stable/modules/preprocessing.html).
Preprocessing consists of the following steps:
1. `map` a function that flattens the 28 x 28 images into 784-element tensors
2. `map` a function that rename the features from pixels and label to X and y for use with Keras
3. `shuffle` the individual examples
4. `batch` the into training batches
We also throw in a `repeat` over the data set to run several epochs on each client device before sending parameters to the server for averaging.
```
AUTOTUNE = (tf.data
.experimental
.AUTOTUNE)
SHUFFLE_BUFFER_SIZE = 1000
NUMBER_TRAINING_EPOCHS = 5 # number of local updates!
TRAINING_BATCH_SIZE = 32
TESTING_BATCH_SIZE = 32
NUMBER_FEATURES = 28 * 28
NUMBER_TARGETS = 10
def _reshape(training_batch):
"""Extracts and reshapes data from a training sample """
pixels = training_batch["pixels"]
label = training_batch["label"]
X = tf.reshape(pixels, shape=[-1]) # flattens 2D pixels to 1D
y = tf.reshape(label, shape=[1])
return X, y
def create_training_dataset(client_dataset: tf.data.Dataset) -> tf.data.Dataset:
"""Create a training dataset for a client from a raw client dataset."""
training_dataset = (client_dataset.map(_reshape, num_parallel_calls=AUTOTUNE)
.shuffle(SHUFFLE_BUFFER_SIZE, seed=None, reshuffle_each_iteration=True)
.repeat(NUMBER_TRAINING_EPOCHS)
.batch(TRAINING_BATCH_SIZE)
.prefetch(buffer_size=AUTOTUNE))
return training_dataset
def create_testing_dataset(client_dataset: tf.data.Dataset) -> tf.data.Dataset:
"""Create a testing dataset for a client from a raw client dataset."""
testing_dataset = (client_dataset.map(_reshape, num_parallel_calls=AUTOTUNE)
.batch(TESTING_BATCH_SIZE))
return testing_dataset
```
## How to choose the clients included in each training round
In a typical federated training scenario there will be a very large population of user devices however only a fraction of these devices are likely to be available for training at a given point in time. For example, if the client devices are mobile phones then they might only participate in training when plugged into a power source, off a metered network, and otherwise idle.
In a simulated environment, where all data is locally available, an approach is to simply sample a random subset of the clients to be involved in each round of training so that the subset of clients involved will vary from round to round.
### How many clients to include in each round?
Updating and averaging a larger number of client models per training round yields better convergence and in a simulated training environment probably makes sense to include as many clients as is computationally feasible. However in real-world training scenario while averaging a larger number of clients improve convergence, it also makes training vulnerable to slowdown due to unpredictable tail delays in computation/communication at/with the clients.
```
def create_federated_data(training_source: tff.simulation.ClientData,
testing_source: tff.simulation.ClientData,
sample_size: typing.Union[float, int],
random_state: np.random.RandomState) -> typing.Dict[str, typing.Tuple[tf.data.Dataset, tf.data.Dataset]]:
# sample clients ids from the training dataset
client_ids = sample_client_ids(training_source.client_ids, sample_size, random_state)
federated_data = {}
for client_id in client_ids:
# create training dataset for the client
_tf_dataset = training_source.create_tf_dataset_for_client(client_id)
training_dataset = create_training_dataset(_tf_dataset)
# create the testing dataset for the client
_tf_dataset = testing_source.create_tf_dataset_for_client(client_id)
testing_dataset = create_testing_dataset(_tf_dataset)
federated_data[client_id] = (training_dataset, testing_dataset)
return federated_data
_random_state = np.random.RandomState(42)
federated_data = create_federated_data(emnist_train,
emnist_test,
sample_size=0.01,
random_state=_random_state)
# keys are client ids, values are (training_dataset, testing_dataset) pairs
len(federated_data)
```
# Creating a model with Keras
If you are using Keras, you likely already have code that constructs a Keras model. Since the model will need to be replicated on each of the client devices we wrap the model in a no-argument Python function, a representation of which, will eventually be invoked on each client to create the model on that client.
```
def create_keras_model_fn() -> keras.Model:
model_fn = keras.models.Sequential([
keras.layers.Input(shape=(NUMBER_FEATURES,)),
keras.layers.Dense(units=NUMBER_TARGETS),
keras.layers.Softmax(),
])
return model_fn
```
In order to use any model with TFF, it needs to be wrapped in an instance of the [`tff.learning.Model`](https://www.tensorflow.org/federated/api_docs/python/tff/learning/Model) interface, which exposes methods to stamp the model's forward pass, metadata properties, etc, and also introduces additional elements such as ways to control the process of computing federated metrics.
Once you have a Keras model like the one we've just defined above, you can have TFF wrap it for you by invoking [`tff.learning.from_keras_model`](https://www.tensorflow.org/federated/api_docs/python/tff/learning/from_keras_model), passing the model and a sample data batch as arguments, as shown below.
```
tff.learning.from_keras_model?
def create_tff_model_fn() -> tff.learning.Model:
keras_model = create_keras_model_fn()
dummy_batch = (tf.constant(0.0, shape=(TRAINING_BATCH_SIZE, NUMBER_FEATURES), dtype=tf.float32),
tf.constant(0, shape=(TRAINING_BATCH_SIZE, 1), dtype=tf.int32))
loss_fn = (keras.losses
.SparseCategoricalCrossentropy())
metrics = [
keras.metrics.SparseCategoricalAccuracy()
]
tff_model_fn = (tff.learning
.from_keras_model(keras_model, dummy_batch, loss_fn, None, metrics))
return tff_model_fn
```
Again, since our model will need to be replicated on each of the client devices we wrap the model in a no-argument Python function, a representation of which, will eventually be invoked on each client to create the model on that client.
# Training the model on federated data
Now that we have a model wrapped as `tff.learning.Model` for use with TFF, we can let TFF construct a Federated Averaging algorithm by invoking the helper function `tff.learning.build_federated_averaging_process` as follows.
Keep in mind that the argument needs to be a constructor (such as `create_tff_model_fn` above), not an already-constructed instance, so that the construction of your model can happen in a context controlled by TFF.
One critical note on the Federated Averaging algorithm below, there are 2 optimizers: a
1. `client_optimizer_fn` which is only used to compute local model updates on each client.
2. `server_optimizer_fn` applies the averaged update to the global model on the server.
N.B. the choice of optimizer and learning rate may need to be different than those you would use to train the model on a standard i.i.d. dataset. Start with stochastic gradient descent with a smaller (than normal) learning rate.
```
tff.learning.build_federated_averaging_process?
CLIENT_LEARNING_RATE = 1e-2
SERVER_LEARNING_RATE = 1e0
def create_client_optimizer(learning_rate: float = CLIENT_LEARNING_RATE,
momentum: float = 0.0,
nesterov: bool = False) -> keras.optimizers.Optimizer:
client_optimizer = (keras.optimizers
.SGD(learning_rate, momentum, nesterov))
return client_optimizer
def create_server_optimizer(learning_rate: float = SERVER_LEARNING_RATE,
momentum: float = 0.0,
nesterov: bool = False) -> keras.optimizers.Optimizer:
server_optimizer = (keras.optimizers
.SGD(learning_rate, momentum, nesterov))
return server_optimizer
federated_averaging_process = (tff.learning
.build_federated_averaging_process(create_tff_model_fn,
create_client_optimizer,
create_server_optimizer,
client_weight_fn=None,
stateful_delta_aggregate_fn=None,
stateful_model_broadcast_fn=None))
```
What just happened? TFF has constructed a pair of *federated computations* (i.e., programs in TFF's internal glue language) and packaged them into a [`tff.utils.IterativeProcess`](https://www.tensorflow.org/federated/api_docs/python/tff/utils/IterativeProcess) in which these computations are available as a pair of properties `initialize` and `next`.
It is a goal of TFF to define computations in a way that they could be executed in real federated learning settings, but currently only local execution simulation runtime is implemented. To execute a computation in a simulator, you simply invoke it like a Python function. This default interpreted environment is not designed for high performance, but it will suffice for this tutorial.
## `initialize`
A function that takes no arguments and returns the state of the federated averaging process on the server. This function is only called to initialize a federated averaging process after it has been created.
```
# () -> SERVER_STATE
print(federated_averaging_process.initialize.type_signature)
state = federated_averaging_process.initialize()
```
## `next`
A function that takes current server state and federated data as arguments and returns the updated server state as well as any training metrics. Calling `next` performs a single round of federated averaging consisting of the following steps.
1. pushing the server state (including the model parameters) to the clients
2. on-device training on their local data
3. collecting and averaging model updates
4. producing a new updated model at the server.
```
# extract the training datasets from the federated data
federated_training_data = [training_dataset for _, (training_dataset, _) in federated_data.items()]
# SERVER_STATE, FEDERATED_DATA -> SERVER_STATE, TRAINING_METRICS
state, metrics = federated_averaging_process.next(state, federated_training_data)
print(f"round: 0, metrics: {metrics}")
```
Let's run a few more rounds on the same training data (which will over-fit to a particular set of clients but will converge faster).
```
number_training_rounds = 15
for n in range(1, number_training_rounds):
state, metrics = federated_averaging_process.next(state, federated_training_data)
print(f"round:{n}, metrics:{metrics}")
```
# First attempt at simulating federated averaging
A proper federated averaging simulation would randomly sample new clients for each training round, allow for evaluation of training progress on training and testing data, and log training and testing metrics to TensorBoard for reference.
Here we define a function that randomly sample new clients prior to each training round and logs training metrics TensorBoard. We defer handling testing data until we discuss federated evaluation towards the end of the tutorial.
```
def simulate_federated_averaging(federated_averaging_process: tff.utils.IterativeProcess,
training_source: tff.simulation.ClientData,
testing_source: tff.simulation.ClientData,
sample_size: typing.Union[float, int],
random_state: np.random.RandomState,
number_rounds: int,
initial_state: None = None,
tensorboard_logging_dir: str = None):
state = federated_averaging_process.initialize() if initial_state is None else initial_state
if tensorboard_logging_dir is not None:
if not os.path.isdir(tensorboard_logging_dir):
os.makedirs(tensorboard_logging_dir)
summary_writer = (tf.summary
.create_file_writer(tensorboard_logging_dir))
with summary_writer.as_default():
for n in range(number_rounds):
federated_data = create_federated_data(training_source,
testing_source,
sample_size,
random_state)
anonymized_training_data = [dataset for _, (dataset, _) in federated_data.items()]
state, metrics = federated_averaging_process.next(state, anonymized_training_data)
print(f"Round: {n}, Training metrics: {metrics}")
for name, value in metrics._asdict().items():
tf.summary.scalar(name, value, step=n)
else:
for n in range(number_rounds):
federated_data = create_federated_data(training_source,
testing_source,
sample_size,
random_state)
anonymized_training_data = [dataset for _, (dataset, _) in federated_data.items()]
state, metrics = federated_averaging_process.next(state, anonymized_training_data)
print(f"Round: {n}, Training metrics: {metrics}")
return state, metrics
federated_averaging_process = (tff.learning
.build_federated_averaging_process(create_tff_model_fn,
create_client_optimizer,
create_server_optimizer,
client_weight_fn=None,
stateful_delta_aggregate_fn=None,
stateful_model_broadcast_fn=None))
_random_state = np.random.RandomState(42)
_tensorboard_logging_dir = "../results/logs/tensorboard"
updated_state, current_metrics = simulate_federated_averaging(federated_averaging_process,
training_source=emnist_train,
testing_source=emnist_test,
sample_size=0.01,
random_state=_random_state,
number_rounds=5,
tensorboard_logging_dir=_tensorboard_logging_dir)
updated_state
current_metrics
```
# Customizing the model implementation
Keras is the recommended high-level model API for TensorFlow and you should be using Keras models and creating TFF models using [`tff.learning.from_keras_model`](https://www.tensorflow.org/federated/api_docs/python/tff/learning/from_keras_model) whenever possible.
However, [`tff.learning`](https://www.tensorflow.org/federated/api_docs/python/tff/learning) provides a lower-level model interface, [`tff.learning.Model`](https://www.tensorflow.org/federated/api_docs/python/tff/learning/Model), that exposes the minimal functionality necessary for using a model for federated learning. Directly implementing this interface (possibly still using building blocks from [`keras`](https://www.tensorflow.org/guide/keras)) allows for maximum customization without modifying the internals of the federated learning algorithms.
Now we are going to repeat the above from scratch!
## Defining model variables
We start by defining a new Python class that inherits from `tff.learning.Model`. In the class constructor (i.e., the `__init__` method) we will initialize all relevant variables using TF primatives as well as define the our "input spec" which defines the shape and types of the tensors that will hold input data.
```
class MNISTModel(tff.learning.Model):
def __init__(self):
# initialize some trainable variables
self._weights = tf.Variable(
initial_value=lambda: tf.zeros(dtype=tf.float32, shape=(NUMBER_FEATURES, NUMBER_TARGETS)),
name="weights",
trainable=True
)
self._bias = tf.Variable(
initial_value=lambda: tf.zeros(dtype=tf.float32, shape=(NUMBER_TARGETS,)),
name="bias",
trainable=True
)
# initialize some variables used in computing metrics
self._number_examples = tf.Variable(0.0, name='number_examples', trainable=False)
self._total_loss = tf.Variable(0.0, name='total_loss', trainable=False)
self._number_true_positives = tf.Variable(0.0, name='number_true_positives', trainable=False)
# define the input spec
self._input_spec = collections.OrderedDict([
('X', tf.TensorSpec([None, NUMBER_FEATURES], tf.float32)),
('y', tf.TensorSpec([None, 1], tf.int32))
])
@property
def input_spec(self):
return self._input_spec
@property
def local_variables(self):
return [self._number_examples, self._total_loss, self._number_true_positives]
@property
def non_trainable_variables(self):
return []
@property
def trainable_variables(self):
return [self._weights, self._bias]
```
## Defining the forward pass
With the variables for model parameters and cumulative statistics in place we can now define the `forward_pass` method that computes loss, makes predictions, and updates the cumulative statistics for a single batch of input data.
```
class MNISTModel(tff.learning.Model):
def __init__(self):
# initialize some trainable variables
self._weights = tf.Variable(
initial_value=lambda: tf.zeros(dtype=tf.float32, shape=(NUMBER_FEATURES, NUMBER_TARGETS)),
name="weights",
trainable=True
)
self._bias = tf.Variable(
initial_value=lambda: tf.zeros(dtype=tf.float32, shape=(NUMBER_TARGETS,)),
name="bias",
trainable=True
)
# initialize some variables used in computing metrics
self._number_examples = tf.Variable(0.0, name='number_examples', trainable=False)
self._total_loss = tf.Variable(0.0, name='total_loss', trainable=False)
self._number_true_positives = tf.Variable(0.0, name='number_true_positives', trainable=False)
# define the input spec
self._input_spec = collections.OrderedDict([
('X', tf.TensorSpec([None, NUMBER_FEATURES], tf.float32)),
('y', tf.TensorSpec([None, 1], tf.int32))
])
@property
def input_spec(self):
return self._input_spec
@property
def local_variables(self):
return [self._number_examples, self._total_loss, self._number_true_positives]
@property
def non_trainable_variables(self):
return []
@property
def trainable_variables(self):
return [self._weights, self._bias]
@tf.function
def _count_true_positives(self, y_true, y_pred):
return tf.reduce_sum(tf.cast(tf.equal(y_true, y_pred), tf.float32))
@tf.function
def _linear_transformation(self, batch):
X = batch['X']
W, b = self.trainable_variables
Z = tf.matmul(X, W) + b
return Z
@tf.function
def _loss_fn(self, y_true, probabilities):
return -tf.reduce_mean(tf.reduce_sum(tf.one_hot(y_true, NUMBER_TARGETS) * tf.math.log(probabilities), axis=1))
@tf.function
def _model_fn(self, batch):
Z = self._linear_transformation(batch)
probabilities = tf.nn.softmax(Z)
return probabilities
@tf.function
def forward_pass(self, batch, training=True):
probabilities = self._model_fn(batch)
y_pred = tf.argmax(probabilities, axis=1, output_type=tf.int32)
y_true = tf.reshape(batch['y'], shape=[-1])
# compute local variables
loss = self._loss_fn(y_true, probabilities)
true_positives = self._count_true_positives(y_true, y_pred)
number_examples = tf.size(y_true, out_type=tf.float32)
# update local variables
self._total_loss.assign_add(loss)
self._number_true_positives.assign_add(true_positives)
self._number_examples.assign_add(number_examples)
batch_output = tff.learning.BatchOutput(
loss=loss,
predictions=y_pred,
num_examples=tf.cast(number_examples, tf.int32)
)
return batch_output
```
## Defining the local metrics
Next, we define a method `report_local_outputs` that returns a set of local metrics. These are the values, in addition to model updates (which are handled automatically), that are eligible to be aggregated to the server in a federated learning or evaluation process.
Finally, we need to determine how to aggregate the local metrics emitted by each device by defining `federated_output_computation`. This is the only part of the code that isn't written in TensorFlow - it's a federated computation expressed in TFF.
```
class MNISTModel(tff.learning.Model):
def __init__(self):
# initialize some trainable variables
self._weights = tf.Variable(
initial_value=lambda: tf.zeros(dtype=tf.float32, shape=(NUMBER_FEATURES, NUMBER_TARGETS)),
name="weights",
trainable=True
)
self._bias = tf.Variable(
initial_value=lambda: tf.zeros(dtype=tf.float32, shape=(NUMBER_TARGETS,)),
name="bias",
trainable=True
)
# initialize some variables used in computing metrics
self._number_examples = tf.Variable(0.0, name='number_examples', trainable=False)
self._total_loss = tf.Variable(0.0, name='total_loss', trainable=False)
self._number_true_positives = tf.Variable(0.0, name='number_true_positives', trainable=False)
# define the input spec
self._input_spec = collections.OrderedDict([
('X', tf.TensorSpec([None, NUMBER_FEATURES], tf.float32)),
('y', tf.TensorSpec([None, 1], tf.int32))
])
@property
def federated_output_computation(self):
return self._aggregate_metrics_across_clients
@property
def input_spec(self):
return self._input_spec
@property
def local_variables(self):
return [self._number_examples, self._total_loss, self._number_true_positives]
@property
def non_trainable_variables(self):
return []
@property
def trainable_variables(self):
return [self._weights, self._bias]
@tff.federated_computation
def _aggregate_metrics_across_clients(metrics):
aggregated_metrics = {
'number_examples': tff.federated_sum(metrics.number_examples),
'average_loss': tff.federated_mean(metrics.average_loss, metrics.number_examples),
'accuracy': tff.federated_mean(metrics.accuracy, metrics.number_examples)
}
return aggregated_metrics
@tf.function
def _count_true_positives(self, y_true, y_pred):
return tf.reduce_sum(tf.cast(tf.equal(y_true, y_pred), tf.float32))
@tf.function
def _linear_transformation(self, batch):
X = batch['X']
W, b = self.trainable_variables
Z = tf.matmul(X, W) + b
return Z
@tf.function
def _loss_fn(self, y_true, probabilities):
return -tf.reduce_mean(tf.reduce_sum(tf.one_hot(y_true, NUMBER_TARGETS) * tf.math.log(probabilities), axis=1))
@tf.function
def _model_fn(self, batch):
Z = self._linear_transformation(batch)
probabilities = tf.nn.softmax(Z)
return probabilities
@tf.function
def forward_pass(self, batch, training=True):
probabilities = self._model_fn(batch)
y_pred = tf.argmax(probabilities, axis=1, output_type=tf.int32)
y_true = tf.reshape(batch['y'], shape=[-1])
# compute local variables
loss = self._loss_fn(y_true, probabilities)
true_positives = self._count_true_positives(y_true, y_pred)
number_examples = tf.cast(tf.size(y_true), tf.float32)
# update local variables
self._total_loss.assign_add(loss)
self._number_true_positives.assign_add(true_positives)
self._number_examples.assign_add(number_examples)
batch_output = tff.learning.BatchOutput(
loss=loss,
predictions=y_pred,
num_examples=tf.cast(number_examples, tf.int32)
)
return batch_output
@tf.function
def report_local_outputs(self):
local_metrics = collections.OrderedDict([
('number_examples', self._number_examples),
('average_loss', self._total_loss / self._number_examples),
('accuracy', self._number_true_positives / self._number_examples)
])
return local_metrics
```
Here are a few points worth highlighting:
* All state that your model will use must be captured as TensorFlow variables, as TFF does not use Python at runtime (remember your code should be written such that it can be deployed to mobile devices).
* Your model should describe what form of data it accepts (input_spec), as in general, TFF is a strongly-typed environment and wants to determine type signatures for all components. Declaring the format of your model's input is an essential part of it.
* Although technically not required, we recommend wrapping all TensorFlow logic (forward pass, metric calculations, etc.) as tf.functions, as this helps ensure the TensorFlow can be serialized, and removes the need for explicit control dependencies.
The above is sufficient for evaluation and algorithms like Federated SGD. However, for Federated Averaging, we need to specify how the model should train locally on each batch.
```
class MNISTrainableModel(MNISTModel, tff.learning.TrainableModel):
def __init__(self, optimizer):
super().__init__()
self._optimizer = optimizer
@tf.function
def train_on_batch(self, batch):
with tf.GradientTape() as tape:
output = self.forward_pass(batch)
gradients = tape.gradient(output.loss, self.trainable_variables)
self._optimizer.apply_gradients(zip(tf.nest.flatten(gradients), tf.nest.flatten(self.trainable_variables)))
return output
```
# Simulating federated training with the new model
With all the above in place, the remainder of the process looks like what we've seen already - just replace the model constructor with the constructor of our new model class, and use the two federated computations in the iterative process you created to cycle through training rounds.
```
def create_custom_tff_model_fn():
optimizer = keras.optimizers.SGD(learning_rate=0.02)
return MNISTrainableModel(optimizer)
federated_averaging_process = (tff.learning
.build_federated_averaging_process(create_custom_tff_model_fn))
_random_state = np.random.RandomState(42)
updated_state, current_metrics = simulate_federated_averaging(federated_averaging_process,
training_source=emnist_train,
testing_source=emnist_test,
sample_size=0.01,
random_state=_random_state,
number_rounds=10)
updated_state
current_metrics
```
# Evaluation
All of our experiments so far presented only federated training metrics - the average metrics over all batches of data trained across all clients in the round. Should we be concerened about overfitting? Yes! In federated averaging algorithms there are two different ways to over-fit.
1. Overfitting the shared model (especially if we use the same set of clients on each round).
2. Over-ftting local models on the clients.
## Federated evaluation
To perform evaluation on federated data, you can construct another federated computation designed for just this purpose, using the [`tff.learning.build_federated_evaluation`](https://www.tensorflow.org/federated/api_docs/python/tff/learning/build_federated_evaluation) function, and passing in your model constructor as an argument. Note that evaluation doesn't perform gradient descent and there's no need to construct optimizers.
```
tff.learning.build_federated_evaluation?
federated_evaluation = (tff.learning
.build_federated_evaluation(create_custom_tff_model_fn))
# function type signature: SERVER_MODEL, FEDERATED_DATA -> METRICS
print(federate_evaluation.type_signature)
```
The `federated_evaluation` function is similar to `tff.utils.IterativeProcess.next` but with two important differences.
1. Function does not return the server state; since evaluation doesn't modify the model or any other aspect of state - you can think of it as stateless.
2. Function only needs the model and doesn't require any other part of server state that might be associated with training, such as optimizer variables.
```
training_metrics = federated_evaluation(updated_state.model, federated_training_data)
training_metrics
```
Note the numbers may look marginally better than what was reported by the last round of training. By convention, the training metrics reported by the iterative training process generally reflect the performance of the model at the beginning of the training round, so the evaluation metrics will always be one step ahead.
## Evaluating on client data not used in training
Since we are training a shared model for digit classication we might also want to evaluate the performance of the model on client test datasets where the corresponding training dataset was not used in training.
```
_random_state = np.random.RandomState(42)
client_datasets = sample_client_datasets(emnist_test, sample_size=0.01, random_state=_random_state)
federated_testing_data = [create_testing_dataset(client_dataset) for _, client_dataset in client_datasets.items()]
testing_metrics = federated_evaluation(updated_state.model, federated_testing_data)
testing_metrics
```
# Adding evaluation to our federated averaging simulation
```
def simulate_federated_averaging(federated_averaging_process: tff.utils.IterativeProcess,
federated_evaluation,
training_source: tff.simulation.ClientData,
testing_source: tff.simulation.ClientData,
sample_size: typing.Union[float, int],
random_state: np.random.RandomState,
number_rounds: int,
tensorboard_logging_dir: str = None):
state = federated_averaging_process.initialize()
if tensorboard_logging_dir is not None:
if not os.path.isdir(tensorboard_logging_dir):
os.makedirs(tensorboard_logging_dir)
summary_writer = (tf.summary
.create_file_writer(tensorboard_logging_dir))
with summary_writer.as_default():
for n in range(number_rounds):
federated_data = create_federated_data(training_source,
testing_source,
sample_size,
random_state)
# extract the training and testing datasets
anonymized_training_data = []
anonymized_testing_data = []
for training_dataset, testing_dataset in federated_data.values():
anonymized_training_data.append(training_dataset)
anonymized_testing_data.append(testing_dataset)
state, _ = federated_averaging_process.next(state, anonymized_training_data)
training_metrics = federated_evaluation(state.model, anonymized_training_data)
testing_metrics = federated_evaluation(state.model, anonymized_testing_data)
print(f"Round: {n}, Training metrics: {training_metrics}, Testing metrics: {testing_metrics}")
# tensorboard logging
for name, value in training_metrics._asdict().items():
tf.summary.scalar(name, value, step=n)
for name, value in testing_metrics._asdict().items():
tf.summary.scalar(name, value, step=n)
else:
for n in range(number_rounds):
federated_data = create_federated_data(training_source,
testing_source,
sample_size,
random_state)
# extract the training and testing datasets
anonymized_training_data = []
anonymized_testing_data = []
for training_dataset, testing_dataset in federated_data.values():
anonymized_training_data.append(training_dataset)
anonymized_testing_data.append(testing_dataset)
state, _ = federated_averaging_process.next(state, anonymized_training_data)
training_metrics = federated_evaluation(state.model, anonymized_training_data)
testing_metrics = federated_evaluation(state.model, anonymized_testing_data)
print(f"Round: {n}, Training metrics: {training_metrics}, Testing metrics: {testing_metrics}")
return state, (training_metrics, testing_metrics)
federated_averaging_process = (tff.learning
.build_federated_averaging_process(create_tff_model_fn,
create_client_optimizer,
create_server_optimizer,
client_weight_fn=None,
stateful_delta_aggregate_fn=None,
stateful_model_broadcast_fn=None))
federated_evaluation = (tff.learning
.build_federated_evaluation(create_tff_model_fn))
_random_state = np.random.RandomState(42)
updated_state, current_metrics = simulate_federated_averaging(federated_averaging_process,
federated_evaluation,
training_source=emnist_train,
testing_source=emnist_test,
sample_size=0.01,
random_state=_random_state,
number_rounds=15)
```
# Wrapping up
## Interesting resources
[PySyft](https://github.com/OpenMined/PySyft) is a Python library for secure and private Deep Learning created by [OpenMined](https://www.openmined.org/). PySyft decouples private data from model training, using
[Federated Learning](https://ai.googleblog.com/2017/04/federated-learning-collaborative.html),
[Differential Privacy](https://en.wikipedia.org/wiki/Differential_privacy),
and [Multi-Party Computation (MPC)](https://en.wikipedia.org/wiki/Secure_multi-party_computation) within the main Deep Learning frameworks like PyTorch and TensorFlow.
| github_jupyter |
```
import pandas as pd
from unidecode import unidecode
import nltk
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
nltk.download('stopwords')
df = pd.read_csv('../base/review.csv',encoding='latin-1')
df.head()
import string
from nltk.stem.snowball import SnowballStemmer
import swifter
import nltk
stemmer = SnowballStemmer("english")
stop = set(stopwords.words('english'))
def lower(texto):
return texto.lower()
def normalize(texto):
return unidecode(texto)
def remove_ponctuation(texto):
for punc in string.punctuation:
texto = texto.replace(punc," ")
return texto
def remove_stopwords(texto):
ret = []
for palavra in texto.split():
if palavra not in stop:
ret.append(palavra)
return ' '.join(ret)
def stem(texto):
ret = []
for palavra in texto.split():
ret.append(stemmer.stem(palavra))
return ' '.join(ret)
def remove_number(texto):
result = ''.join([i for i in texto if not i.isdigit()])
return result
def pipeline(texto):
texto = normalize(texto)
texto = lower(texto)
texto = remove_ponctuation(texto)
texto = remove_stopwords(texto)
texto = remove_number(texto)
texto = stem(texto)
return texto
df['SentimentText'].apply(lower).head()
remove_ponctuation("é, ué!")
len(df)
df['preproc'] = df['SentimentText'].swifter.apply(pipeline)
# vectorizer = CountVectorizer()
# X = vectorizer.fit_transform(df['preproc'])
# len(vectorizer.get_feature_names())
vectorizer_tfidf = TfidfVectorizer()
X = vectorizer_tfidf.fit_transform(df['preproc'])
len(vectorizer_tfidf.get_feature_names())
y = df['Sentiment']
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
import numpy as np
clf = LogisticRegression(solver='liblinear')
np.mean(cross_val_score(clf,X, y, cv=10,scoring='balanced_accuracy'))
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
np.mean(cross_val_score(clf,X, y, cv=10,scoring='balanced_accuracy'))
clf.fit(X,y)
import pickle
filename = 'clf.pickle'
outfile = open(filename,'wb')
pickle.dump(clf,outfile)
outfile.close()
filename = 'vectorizer.pickle'
outfile = open(filename,'wb')
pickle.dump(vectorizer_tfidf,outfile)
outfile.close()
#I just love this movie. Specially the climax, seriously one of the best climax I have ever seen.
#I just want to say how amazing this film is from start to finish. This will take you on a emotional ride.You will not he disappointed
#LITERALLY , one of the best movies i have seen in my entire life , filled with a tone of action and emotions . you will love avenger endgame . ' i love you 3000 '
```
| github_jupyter |
## 最小二乘法
```
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import leastsq
Xi = np.array(
[157, 162, 169, 176, 188, 200, 211, 220, 230, 237, 247, 256, 268, 287, 285, 290, 301, 311, 326, 335, 337, 345, 348,
358, 384, 396, 409, 415, 432, 440, 448, 449, 461, 467, 478, 493], dtype=np.float)
Yi = np.array(
[143, 146, 153, 160, 169, 180, 190, 196, 207, 215, 220, 228, 242, 253, 251, 257, 271, 283, 295, 302, 301, 305, 308,
324, 341, 357, 371, 382, 397, 406, 413, 411, 422, 434, 447, 458], dtype=np.float)
def func(p, x):
k, b = p
return k * x + b
def error(p, x, y):
return func(p, x) - y
# k,b的初始值,可以任意设定,经过几次试验,发现p0的值会影响cost的值:Para[1]
p0 = [1, 20]
# 把error函数中除了p0以外的参数打包到args中(使用要求)
Para = leastsq(error, p0, args=(Xi, Yi))
# 读取结果
k, b = Para[0]
# 画样本点
plt.figure(figsize=(8, 6)) ##指定图像比例: 8:6
plt.scatter(Xi, Yi, color="green", linewidth=2)
# 画拟合直线
# x = np.linspace(0, 12, 100) ##在0-15直接画100个连续点
# x = np.linspace(0, 500, int(500/12)*100) ##在0-15直接画100个连续点
# y = k * x + b ##函数式
plt.plot(Xi, k * Xi + b, color="red", linewidth=2)
plt.legend(loc='lower right') # 绘制图例
plt.show()
```
## 梯度下降法
```
import numpy as np
import matplotlib.pyplot as plt
x = np.array(
[157, 162, 169, 176, 188, 200, 211, 220, 230, 237, 247, 256, 268, 287, 285, 290, 301, 311, 326, 335, 337, 345, 348,
358, 384, 396, 409, 415, 432, 440, 448, 449, 461, 467, 478, 493], dtype=np.float)
y = np.array(
[143, 146, 153, 160, 169, 180, 190, 196, 207, 215, 220, 228, 242, 253, 251, 257, 271, 283, 295, 302, 301, 305, 308,
324, 341, 357, 371, 382, 397, 406, 413, 411, 422, 434, 447, 458], dtype=np.float)
def GD(x, y, learning_rate, iteration_num=10000):
theta = np.random.rand(2, 1) # 初始化参数
x = np.hstack((np.ones((len(x), 1)), x.reshape(len(x), 1)))
y = y.reshape(len(y), 1)
for i in range(iteration_num):
# 计算梯度
grad = np.dot(x.T, (np.dot(x, theta) - y)) / x.shape[0]
# 更新参数
theta -= learning_rate * grad
# 计算 MSE
# loss = np.linalg.norm(np.dot(x, theta) - y)
plt.figure()
plt.title('Learning rate: {}, iteration_num: {}'.format(learning_rate, iteration_num))
plt.scatter(x[:, 1], y.reshape(len(y)))
plt.plot(x[:, 1], np.dot(x, theta), color='red', linewidth=3)
GD(x, y, learning_rate=0.00001, iteration_num=1)
GD(x, y, learning_rate=0.00001, iteration_num=3)
GD(x, y, learning_rate=0.00001, iteration_num=10)
GD(x, y, learning_rate=0.00001, iteration_num=100)
GD(x, y, learning_rate=0.000001, iteration_num=1)
GD(x, y, learning_rate=0.000001, iteration_num=3)
GD(x, y, learning_rate=0.000001, iteration_num=10)
GD(x, y, learning_rate=0.000001, iteration_num=100)
```
| github_jupyter |
##### Copyright 2020 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# グラフと関数の基礎
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://www.tensorflow.org/guide/intro_to_graphs"><img src="https://www.tensorflow.org/images/tf_logo_32px.png"> TensorFlow.orgで表示</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/guide/intro_to_graphs.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab で実行</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/guide/intro_to_graphs.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub でソースを表示{</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/guide/intro_to_graphs.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード/a0}</a></td>
</table>
# グラフと `tf.function` の基礎
このガイドは、TensorFlow の仕組みを説明するために、TensorFlow と Keras 基礎を説明します。今すぐ Keras に取り組みたい方は、[Keras のガイド一覧](keras/)を参照してください。
このガイドでは、グラフ取得のための単純なコード変更、格納と表現、およびモデルの高速化とエクスポートを行うための使用方法について、TensorFlow の中核的な仕組みを説明します。
注意: TensorFlow 1.x のみの知識をお持ちの場合は、このガイドでは、非常に異なるグラフビューが紹介されています。
これは、基礎を概説したガイドです。これらの概念の徹底ガイドについては、[`tf.function` ガイド](function)を参照してください。
## グラフとは?
前回の 3 つのガイドでは、TensorFlow の **Eager** execution について説明しました。これは、TensorFlow 演算が演算ごとにPythonによって実行され、結果を Python に返すことを意味します。Eager TensorFlow は GPU を活用し、変数、テンソル、さらには演算を GPU と TPU に配置することができます。また、デバックも簡単に行えます。
一部のユーザーは、Python から移動する必要はありません。
ただし、TensorFlow を Python で演算ごとに実行すると、ほかの方法では得られない多数の高速化機能が利用できなくなります。Python からテンソルの計算を抽出できる場合は、*グラフ* にすることができます。
**グラフとは、計算のユニットを表す一連の `tf.Operation` オブジェクトと、演算間を流れるデータのユニットを表す `tf.Tensor` オブジェクトを含むデータ構造です。** `tf.Graph` コンテキストで定義されます。これらのグラフはデータ構造であるため、元の Python コードがなくても、保存、実行、および復元することができます。
次は、TensorBoard で視覚化された単純な二層グラフです。

## グラフのメリット
グラフを使用すると、柔軟性が大幅に向上し、モバイルアプリケーション。組み込みデバイス、バックエンドサーバーといった Python インタプリタのない環境でも TensorFlow グラフを使用できます。TensorFlow は、Python からエクスポートされた場合に、保存されるモデルの形式としてグラフを使用します。
また、グラフは最適化を簡単に行えるため、コンパイラは次のような変換を行えます。
- 計算に定数ノードを畳み込むで、テンソルの値を統計的に推論します*(「定数畳み込み」)*。
- 独立した計算のサブパートを分離し、スレッドまたはデバイスに分割します。
- 共通部分式を取り除き、算術演算を単純化します。
これやほかの高速化を実行する [Grappler](./graph_optimization.ipynb) という総合的な最適化システムがあります。
まとめると、グラフは非常に便利なもので、**複数のデバイス**で、TensorFlow の**高速化**、**並列化**、および効率化を期待することができます。
ただし、便宜上、Python で機械学習モデル(またはその他の計算)を定義した後、必要となったときに自動的にグラフを作成することをお勧めします。
# グラフのトレース
TensorFlow でグラフを作成する方法は、直接呼出しまたはデコレータのいずれかとして `tf.function` を使用することです。
```
import tensorflow as tf
import timeit
from datetime import datetime
# Define a Python function
def function_to_get_faster(x, y, b):
x = tf.matmul(x, y)
x = x + b
return x
# Create a `Function` object that contains a graph
a_function_that_uses_a_graph = tf.function(function_to_get_faster)
# Make some tensors
x1 = tf.constant([[1.0, 2.0]])
y1 = tf.constant([[2.0], [3.0]])
b1 = tf.constant(4.0)
# It just works!
a_function_that_uses_a_graph(x1, y1, b1).numpy()
```
`tf.function` 化された関数は、[Python コーラブル]()で、Python 相当と同じように機能します。特定のクラス(`python.eager.def_function.Function`)を使用しますが、ユーザーにとっては、トレースできないものと同じように動作します。
`tf.function` は、それが呼び出す Python 関数を再帰的にトレースします。
```
def inner_function(x, y, b):
x = tf.matmul(x, y)
x = x + b
return x
# Use the decorator
@tf.function
def outer_function(x):
y = tf.constant([[2.0], [3.0]])
b = tf.constant(4.0)
return inner_function(x, y, b)
# Note that the callable will create a graph that
# includes inner_function() as well as outer_function()
outer_function(tf.constant([[1.0, 2.0]])).numpy()
```
TensorFlow 1.x を使用したことがある場合は、`Placeholder` または `tf.Sesssion` をまったく定義する必要がないことに気づくでしょう。
## フローの制御と副次的影響
フロー制御とループは、デフォルトで `tf.autograph` によって TensorFlow に変換されます。Autograph は、ループコンストラクトの標準化、アンロール、および [AST](https://docs.python.org/3/library/ast.html) マニピュレーションなどのメソッドを組み合わせて使用します。
```
def my_function(x):
if tf.reduce_sum(x) <= 1:
return x * x
else:
return x-1
a_function = tf.function(my_function)
print("First branch, with graph:", a_function(tf.constant(1.0)).numpy())
print("Second branch, with graph:", a_function(tf.constant([5.0, 5.0])).numpy())
```
Autograph 変換を直接呼び出して、Python が TensorFlow 演算に変換される様子を確認することができます。これはほとんど解読不能ですが、変換を確認することができます。
```
# Don't read the output too carefully.
print(tf.autograph.to_code(my_function))
```
Autograph は、`if-then` 句、ループ、 `break`、`return`、`continue` などを自動的に変換します。
ほとんどの場合、Autograph の動作に特別な考慮はいりませんが、いくつかの注意事項があり、これについては [tf.function ガイド](./function.ipynb)のほか、[Autograph 完全リファレンス](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/index.md)が役立ちます。
## 高速化の確認
tensor-using 関数を `tf.function` でラッピングするだけでは、コードは高速化しません。単一のマシンで数回呼び出された小さな関数では、グラフまたはグラフの一部の呼び出しにかかるオーバーヘッドによってランタイムが占有されてしまうことがあります。また、GPU 大きな負荷をかける畳み込みのスタックなど、計算のほとんどがすでにアクセラレータで発生している場合は、グラフの高速化をあまり確認できません。
複雑な計算については、グラフによって大幅な高速化を得ることができます。これは、グラフが Python からデバイスへの通信や一部の高速化の実装を減らすためです。
次のコードは、小さな密のレイヤーでの数回の実行にかかる時間を計測します。
```
# Create an oveerride model to classify pictures
class SequentialModel(tf.keras.Model):
def __init__(self, **kwargs):
super(SequentialModel, self).__init__(**kwargs)
self.flatten = tf.keras.layers.Flatten(input_shape=(28, 28))
self.dense_1 = tf.keras.layers.Dense(128, activation="relu")
self.dropout = tf.keras.layers.Dropout(0.2)
self.dense_2 = tf.keras.layers.Dense(10)
def call(self, x):
x = self.flatten(x)
x = self.dense_1(x)
x = self.dropout(x)
x = self.dense_2(x)
return x
input_data = tf.random.uniform([60, 28, 28])
eager_model = SequentialModel()
graph_model = tf.function(eager_model)
print("Eager time:", timeit.timeit(lambda: eager_model(input_data), number=10000))
print("Graph time:", timeit.timeit(lambda: graph_model(input_data), number=10000))
```
### 多層型関数
関数をトレースする場合、**多層型**の `Function` オブジェクトを作成します。多層型関数は Pythonコーラブルで、1つの API の背後にあるいくつかの具象関数グラフをカプセル化します。
この `Function` は、あらゆる `dtypes` と形状に使用できます。新しい引数シグネチャでそれを呼び出すたびに、元の関数が新しい引数で再トレースされます。`Function` は、そのトレースに対応する `tf.Graph` を `concrete_function` に格納します。関数がすでにそのような引数でトレースされている場合は、トレース済みのグラフが取得されます。
概念的に、次のようになります。
- **`tf.Graph`** は計算を説明する未加工のポータブルなデータ構造である
- **`Function`** は、ConcreteFunctions のキャッシュ、トレース、およびディスパッチャーである
- **`ConcreteFunction`** は、Python からグラフを実行できるグラフの Eager 対応ラッパーである
### 多層型関数の検査
`a_function` を検査できます。これはPython 関数 `my_function` に対して `tf.function` を呼び出した結果です。この例では、3 つの引数で `a_function` を呼び出すことで、3 つの具象関数を得られています。
```
print(a_function)
print("Calling a `Function`:")
print("Int:", a_function(tf.constant(2)))
print("Float:", a_function(tf.constant(2.0)))
print("Rank-1 tensor of floats", a_function(tf.constant([2.0, 2.0, 2.0])))
# Get the concrete function that works on floats
print("Inspecting concrete functions")
print("Concrete function for float:")
print(a_function.get_concrete_function(tf.TensorSpec(shape=[], dtype=tf.float32)))
print("Concrete function for tensor of floats:")
print(a_function.get_concrete_function(tf.constant([2.0, 2.0, 2.0])))
# Concrete functions are callable
# Note: You won't normally do this, but instead just call the containing `Function`
cf = a_function.get_concrete_function(tf.constant(2))
print("Directly calling a concrete function:", cf(tf.constant(2)))
```
この例では、スタックの非常に奥を調べています。具体的にトレースを管理していない限り、通常は、ここに示されるように具象関数を呼び出す必要はありません。
# Eager execution でのデバッグ
スタックトレースが長い場合、特に `tf.Graph` または `with tf.Graph().as_default()` の参照が含まれる場合、グラフコンテキストで実行している可能性があります。TensorFlow のコア関数は Keras の `model.fit()` などのグラフコンテキストを使用します。
Eager execution をデバッグする方がはるかに簡単であることがよくあります。スタックトレースは比較的に短く、理解しやすいからです。
グラフのデバックが困難な場合は、Eager execution に戻ってデバックすることができます。
Eager で実行していることを確認するには、次を行います。
- メソッドとレイヤーを直接コーラブルとして呼び出す
- Keras compile/fit を使用している場合、コンパイル時に **`model.compile(run_eagerly=True)`** を使用する
- **`tf.config.experimental_run_functions_eagerly(True)`** でグローバル実行モードを設定する
### `run_eagerly=True` を使用する
```
# Define an identity layer with an eager side effect
class EagerLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(EagerLayer, self).__init__(**kwargs)
# Do some kind of initialization here
def call(self, inputs):
print("\nCurrently running eagerly", str(datetime.now()))
return inputs
# Create an override model to classify pictures, adding the custom layer
class SequentialModel(tf.keras.Model):
def __init__(self):
super(SequentialModel, self).__init__()
self.flatten = tf.keras.layers.Flatten(input_shape=(28, 28))
self.dense_1 = tf.keras.layers.Dense(128, activation="relu")
self.dropout = tf.keras.layers.Dropout(0.2)
self.dense_2 = tf.keras.layers.Dense(10)
self.eager = EagerLayer()
def call(self, x):
x = self.flatten(x)
x = self.dense_1(x)
x = self.dropout(x)
x = self.dense_2(x)
return self.eager(x)
# Create an instance of this model
model = SequentialModel()
# Generate some nonsense pictures and labels
input_data = tf.random.uniform([60, 28, 28])
labels = tf.random.uniform([60])
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
```
まず、Eager を使用せずにモデルをコンパイルします。モデルはトレースされません。名前にも関わらず、`compile` は、損失関数、最適化、およびトレーニングパラメータのセットアップしか行いません。
```
model.compile(run_eagerly=False, loss=loss_fn)
```
ここで、`fit` を呼び出し、関数がトレース(2 回)されると Eager 効果が実行しなくなるのを確認します。
```
model.fit(input_data, labels, epochs=3)
```
ただし、エポックを 1 つでも Eager で実行すると、Eager の副次的作用が 2 回現れます。
```
print("Running eagerly")
# When compiling the model, set it to run eagerly
model.compile(run_eagerly=True, loss=loss_fn)
model.fit(input_data, labels, epochs=1)
```
### `experimental_run_functions_eagerly` を使用する
また、すべてを Eager で実行するよにグローバルに設定することができます。これは、トレースし直した場合にのみ機能することに注意してください。トレースされた関数は、トレースされたままとなり、グラフとして実行します。
```
# Now, globally set everything to run eagerly
tf.config.experimental_run_functions_eagerly(True)
print("Run all functions eagerly.")
# First, trace the model, triggering the side effect
polymorphic_function = tf.function(model)
# It was traced...
print(polymorphic_function.get_concrete_function(input_data))
# But when you run the function again, the side effect happens (both times).
result = polymorphic_function(input_data)
result = polymorphic_function(input_data)
# Don't forget to set it back when you are done
tf.config.experimental_run_functions_eagerly(False)
```
# トレースとパフォーマンス
トレースにはある程度のオーバーヘッドがかかります。小さな関数のトレースは素早く行えますが、大規模なモデルであればかなりの時間がかかる場合があります。パフォーマンスが上昇するとこの部分の時間は迅速に取り戻されますが、大規模なモデルのトレーニングの最初の数エポックでは、トレースによって遅延が発生する可能性があることに注意しておくことが重要です。
モデルの規模に関係なく、頻繁にトレースするのは避けたほうがよいでしょう。[tf.function ガイドのこのセクション](function.ipynb#when_to_retrace)では、入力仕様を設定し、テンソル引数を使用して再トレースを回避する方法について説明しています。フォーマンスが異常に低下している場合は、誤って再トレースしていないかどうかを確認することをお勧めします。
eager-only の副次的効果(Python 引数の出力など)を追加して、関数がいつトレースされているかを確認できます。ここでは、新しい Python 引数が常に再トレースをトリガするため、余分な再トレースが発生していることを確認できます。
```
# Use @tf.function decorator
@tf.function
def a_function_with_python_side_effect(x):
print("Tracing!") # This eager
return x * x + tf.constant(2)
# This is traced the first time
print(a_function_with_python_side_effect(tf.constant(2)))
# The second time through, you won't see the side effect
print(a_function_with_python_side_effect(tf.constant(3)))
# This retraces each time the Python argument chances
# as a Python argument could be an epoch count or other
# hyperparameter
print(a_function_with_python_side_effect(2))
print(a_function_with_python_side_effect(3))
```
# 次のステップ
より詳しい説明については、`tf.function` API リファレンスページと[ガイド](./function.ipynb)を参照してください。
| github_jupyter |
```
import warnings
warnings.filterwarnings('ignore') # 実行に影響のない warninig を非表示にします. 非推奨.
```
# Chapter 5: 機械学習 回帰問題
## 5-1. 回帰問題を Pythonで解いてみよう
1. データセットの用意
2. モデル構築
### 5-1-1. データセットの用意
今回はwine-quality datasetを用いる.
wine-quality dataset はワインのアルコール濃度や品質などの12要素の数値データ.
赤ワインと白ワイン両方あります。赤ワインの含まれるデータ数は1600ほど.
まずはデータセットをダウンロードする.
proxy下ではjupyter notebookに設定をしないと以下は動作しない.
```
! wget https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv ./data/winequality-red.csv
```
jupyter notebook の設定が面倒な人へ.
proxyの設定をしたshell、もしくはブラウザなどで以下のURIからダウンロードしてください.
https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/
```
import pandas as pd
wine = pd.read_csv("./data/winequality-red.csv", sep=";") # sepは区切り文字の指定
display(wine.head(5))
```
まずは説明変数1つで回帰を行ってみよう. 今回はalcoholを目的変数 $t$ に, densityを説明変数 $x$ にする.
```
X = wine[["density"]].values
T = wine["alcohol"].values
```
#### 前処理
データを扱いやすいように中心化する.
```
X = X - X.mean()
T = T - T.mean()
```
trainとtestに分割する.
```
X_train = X[:1000, :]
T_train = T[:1000]
X_test = X[1000:, :]
T_test = T[1000:]
import matplotlib.pyplot as plt
%matplotlib inline
fig, axes = plt.subplots(ncols=2, figsize=(12, 4))
axes[0].scatter(X_train, T_train, marker=".")
axes[0].set_title("train")
axes[1].scatter(X_test, T_test, marker=".")
axes[1].set_title("test")
fig.show()
```
train と test の分布がかなり違う.
予め shuffle して train と test に分割する必要があるようだ.
XとTの対応関係を崩さず shuffle する方法は多々あるが、その1つが以下.
```
import numpy as np
np.random.seed(0) # random の挙動を固定
p = np.random.permutation(len(X)) # random な index のリスト
X = X[p]
T = T[p]
X_train = X[:1000, :]
T_train = T[:1000]
X_test = X[1000:, :]
T_test = T[1000:]
fig, axes = plt.subplots(ncols=2, figsize=(12, 4))
axes[0].scatter(X_train, T_train, marker=".")
axes[0].set_title("train")
axes[1].scatter(X_test, T_test, marker=".")
axes[1].set_title("test")
fig.show()
```
### 5-1-2. モデルの構築
**今回は**, 目的変数 $t$ を以下の回帰関数で予測する.
$$y=ax+b$$
この時、損失が最小になるように, パラメータ$a,b$を定める必要がある. ここでは二乗損失関数を用いる.
$$\mathrm{L}\left(a, b\right)
=\sum^{N}_{n=1}\left(t_n - y_n\right)^2
=\sum^{N}_{n=1}\left(t_n - ax_x-b\right)^2$$
<span style="color: gray; ">※これは, 目的変数 $t$ が上記の回帰関数 $y$ を中心としたガウス分布に従うという仮定を置いて最尤推定することと等価.</span>
```
class MyLinearRegression(object):
def __init__(self):
"""
Initialize a coefficient and an intercept.
"""
self.a =
self.b =
def fit(self, X, y):
"""
X: data, array-like, shape (n_samples, n_features)
y: array, shape (n_samples,)
Estimate a coefficient and an intercept from data.
"""
return self
def predict(self, X):
"""
Calc y from X
"""
return y
```
上記の単回帰のクラスを完成させ, 以下の実行によって図の回帰直線が得られるはずだ.
```
clf = MyLinearRegression()
clf.fit(X_train, T_train)
# 回帰係数
print("係数: ", clf.a)
# 切片
print("切片: ", clf.b)
fig, axes = plt.subplots(ncols=2, figsize=(12, 4))
axes[0].scatter(X_train, T_train, marker=".")
axes[0].plot(X_train, clf.predict(X_train), color="red")
axes[0].set_title("train")
axes[1].scatter(X_test, T_test, marker=".")
axes[1].plot(X_test, clf.predict(X_test), color="red")
axes[1].set_title("test")
fig.show()
```
もしdatasetをshuffleせずに上記の学習を行った時, 得られる回帰直線はどうなるだろう?
試してみてください.
## 5-2. scikit-learnについて
### 5-2-1. モジュールの概要
[scikit-learn](http://scikit-learn.org/stable/)のホームページに詳しい情報がある.
実は scikit-learn に線形回帰のモジュールがすでにある.
#### scikit-learn の特徴
- scikit-learn(sklearn)には,多くの機械学習アルゴリズムが入っており,統一した形式で書かれているため利用しやすい.
- 各手法をコードで理解するだけでなく,その元となる論文も紹介されている.
- チュートリアルやどのように利用するのかをまとめたページもあり,似た手法が列挙されている.
```
import sklearn
print(sklearn.__version__)
from sklearn.linear_model import LinearRegression
clf = LinearRegression()
# 予測モデルを作成
clf.fit(X_train, T_train)
# 回帰係数
print("係数: ", clf.coef_)
# 切片
print("切片: ", clf.intercept_)
# 決定係数
print("決定係数: ", clf.score(X_train, T_train))
fig, axes = plt.subplots(ncols=2, figsize=(12, 4))
axes[0].scatter(X_train, T_train, marker=".")
axes[0].plot(X_train, clf.predict(X_train), color="red")
axes[0].set_title("train")
axes[1].scatter(X_test, T_test, marker=".")
axes[1].plot(X_test, clf.predict(X_test), color="red")
axes[1].set_title("test")
fig.show()
```
自分のコードと同じ結果が出ただろうか?
また, データを shuffle せず得られた回帰直線のスコアと, shuffleした時の回帰直線のスコアの比較もしてみよう.
scikit-learn の linear regression のコードは [github][1] で公開されている.
コーディングの参考になると思うので眺めてみるといいだろう.
### 5-2-2. 回帰モデルの評価
性能を測るといっても,その目的によって指標を変える必要がある.
どのような問題で,どのような指標を用いることが一般的か?という問いに対しては,先行研究を確認することを勧める.
また,指標それぞれの特性(数学的な意味)を知っていることもその役に立つだろう.
[参考][2]
回帰モデルの評価に用いられる指標は一般にMAE, MSE, 決定係数などが存在する.
1. MAE
2. MSE
3. 決定係数
scikit-learn はこれらの計算をするモジュールも用意されている.
[1]:https://github.com/scikit-learn/scikit-learn/blob/1495f69242646d239d89a5713982946b8ffcf9d9/sklearn/linear_model/base.py#L367
[2]:https://scikit-learn.org/stable/modules/model_evaluation.html
```
from sklearn import metrics
T_pred = clf.predict(X_test)
print("MAE: ", metrics.mean_absolute_error(T_test, T_pred))
print("MSE: ", metrics.mean_squared_error(T_test, T_pred))
print("決定係数: ", metrics.r2_score(T_test, T_pred))
```
### 5-2-3. scikit-learn の他モデルを使ってみよう
```
# 1. データセットを用意する
from sklearn import datasets
iris = datasets.load_iris() # ここではIrisデータセットを読み込む
print(iris.data[0], iris.target[0]) # 1番目のサンプルのデータとラベル
# 2.学習用データとテスト用データに分割する
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target)
# 3. 線形SVMという手法を用いて分類する
from sklearn.svm import SVC, LinearSVC
clf = LinearSVC()
clf.fit(X_train, y_train) # 学習
# 4. 分類器の性能を測る
y_pred = clf.predict(X_test) # 予測
print(metrics.classification_report(y_true=y_test, y_pred=y_pred)) # 予測結果の評価
```
### 5-2-4. 分類モデルの評価
分類問題に対する指標について考えてみよう.一般的な指標だけでも以下の4つがある.
1. 正解率(accuracy)
2. 精度(precision)
3. 再現率(recall)
4. F値(F1-score)
(精度,再現率,F値にはmacro, micro, weightedなどがある)
今回の実験でのそれぞれの値を見てみよう.
```
print('accuracy: ', metrics.accuracy_score(y_test, y_pred))
print('precision:', metrics.precision_score(y_test, y_pred, average='macro'))
print('recall: ', metrics.recall_score(y_test, y_pred, average='macro'))
print('F1 score: ', metrics.f1_score(y_test, y_pred, average='macro'))
```
## 5-3. 問題に合わせたコーディング
### 5-3-1. Irisデータの可視化
Irisデータは4次元だったので,直接可視化することはできない.
4次元のデータをPCAによって圧縮して,2次元にし可視化する.
```
from sklearn.decomposition import PCA
from sklearn import datasets
iris = datasets.load_iris()
pca = PCA(n_components=2)
X, y = iris.data, iris.target
X_pca = pca.fit_transform(X) # 次元圧縮
print(X_pca.shape)
import matplotlib.pyplot as plt
%matplotlib inline
plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y);
# 次元圧縮したデータを用いて分類してみる
X_train, X_test, y_train, y_test = train_test_split(X_pca, iris.target)
clf = LinearSVC()
clf.fit(X_train, y_train)
y_pred2 = clf.predict(X_test)
from sklearn import metrics
print(metrics.classification_report(y_true=y_test, y_pred=y_pred2)) # 予測結果の評価
```
### 5-3-2. テキストに対する処理
#### テキストから特徴量を設計
テキストのカウントベクトルを作成し,TF-IDFを用いて特徴ベクトルを作る.
いくつかの設計ができるが,例題としてこの手法を用いる.
ここでは,20newsgroupsというデータセットを利用する.
```
from sklearn.datasets import fetch_20newsgroups
categories = ['alt.atheism', 'soc.religion.christian','comp.graphics', 'sci.med']
news_train = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=42)
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
count_vec = CountVectorizer()
X_train_counts = count_vec.fit_transform(news_train.data)
tf_transformer = TfidfTransformer(use_idf=False).fit(X_train_counts)
X_train_tf = tf_transformer.transform(X_train_counts)
```
#### Naive Bayseによる学習
```
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB().fit(X_train_tf, news_train.target)
docs = ["God is love.", "I study about Computer Science."]
X_test_counts = count_vec.transform(docs)
X_test_tf = tf_transformer.transform(X_test_counts)
preds = clf.predict(X_test_tf)
for d, label_id in zip(docs, preds):
print("{} -> {}".format(d, news_train.target_names[label_id]))
```
このように文に対して,categoriesのうちのどれに対応するかを出力する学習器を作ることができた.
この技術を応用することで,ある文がポジティブかネガティブか,スパムか否かなど自然言語の文に対する分類問題を解くことができる.
### 5-3-3. Pipelineによる結合
```
from sklearn.pipeline import Pipeline
text_clf = Pipeline([('countvec', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB())])
text_clf.fit(news_train.data, news_train.target)
for d, label_id in zip(docs, text_clf.predict(docs)):
print("{} -> {}".format(d, news_train.target_names[label_id]))
```
## 5.4 scikit-learn 準拠コーディング
scikit-learn 準拠でコーディングするメリットは多数存在する.
1. scikit-learn の用意するgrid search や cross validation を使える.
2. 既存のscikit-learn の他手法と入れ替えが容易になる.
3. 他の人にみてもらいやすい。使ってもらいやすい.
4. <span style="color: gray; ">本家のコミッターになれるかも?</span>
詳しくは [Developer’s Guide][1] に書いてある.
[1]:https://scikit-learn.org/stable/developers/#rolling-your-own-estimator
scikit-learn ではモデルは以下の4つのタイプに分類されている.
- Classifer
- Naive Bayes Classifer などの分類モデル
- Clusterring
- K-mearns 等のクラスタリングモデル
- Regressor
- Lasso, Ridge などの回帰モデル
- Transformer
- PCA などの変数の変換モデル
***準拠コーディングでやるべきことは、***
- sklearn.base.BaseEstimatorを継承する
- 上記タイプに応じたMixinを多重継承する
(予測モデルの場合)
- fitメソッドを実装する
- initでパラメータをいじる操作を入れるとgrid searchが動かなくなる(後述)
- predictメソッドを実装する
### 5-4-1. リッジ回帰のscikit-learn 準拠コーディング
試しに今までにコーディングした MyLinearRegression を改造し, scikit-learn 準拠にコーディングし直してみよう.
ついでにリッジ回帰の選択ができるようにもしてみよう.
```
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils.validation import check_X_y, check_is_fitted, check_array
```
回帰なので BaseEstimator と RegressorMixin の継承をする.
さらにリッジ回帰のオプションも追加するため, initにハイパーパラメータも追加する.
入力のshapeやdtypeを整えるために```check_X_y```や```check_array```を用いる(推奨).
```
class MyLinearRegression(BaseEstimator, RegressorMixin):
def __init__(self, lam = 0):
"""
Initialize a coefficient and an intercept.
"""
self.a =
self.b =
self.lam = lam
def fit(self, X, y):
"""
X: array-like, shape (n_samples, n_features)
y: array, shape (n_samples,)
Estimate a coefficient and an intercept from data.
"""
X, y = check_X_y(X, y, y_numeric=True)
if self.lam != 0:
pass
else:
pass
self.a_ =
self.b_ =
return self
def predict(self, X):
"""
Calc y from X
"""
check_is_fitted(self, "a_", "b_") # 学習済みかチェックする(推奨)
X = check_array(X)
return y
```
***制約***
- initで宣言する変数に全て初期値を定める
- また引数の変数名とクラス内の変数名は一致させる
- initにデータは与えない。データの加工なども(必要なら)fit内で行う
- データから推定された値はアンダースコアをつけて区別する. 今回なら、a_と b_をfit関数内で新しく定義する.
- アンダースコアで終わる変数をinit内では宣言しないこと.
- init内で引数の確認, 加工をしてはいけない. 例えば```self.lam=2*lam```などをするとgrid searchができなくなる. [参考][1]
> As model_selection.GridSearchCV uses set_params to apply parameter setting to estimators, it is essential that calling set_params has the same effect as setting parameters using the __init__ method. The easiest and recommended way to accomplish this is to not do any parameter validation in __init__. All logic behind estimator parameters, like translating string arguments into functions, should be done in fit.
[github][2]のコードをお手本にしてみるのもいいだろう.
[1]:https://scikit-learn.org/stable/developers/contributing.html#coding-guidelines
[2]:https://github.com/scikit-learn/scikit-learn/blob/1495f69242646d239d89a5713982946b8ffcf9d9/sklearn/linear_model/base.py#L367
### 5-4-2. scikit-learn 準拠かどうか確認
自作のコードがちゃんとscikit-learn準拠かどうか確かめるには以下を実行する.
```
from sklearn.utils.estimator_checks import check_estimator
check_estimator(MyLinearRegression)
```
問題があれば指摘してくれるはずだ. なお上記を必ずパスする必要はない.
#### Grid Search
準拠モデルを作ったなら, ハイパーパラメータの決定をscikit-learnでやってみよう.
```
import numpy as np
from sklearn.model_selection import GridSearchCV
np.random.seed(0)
# Grid search
parameters = {'lam':np.exp([i for i in range(-30,1)])}
reg = GridSearchCV(MyLinearRegression(),parameters,cv=5)
reg.fit(X_train,T_train)
best = reg.best_estimator_
# 決定係数
print("決定係数: ", best.score(X_train, T_train)) # BaseEstimatorを継承しているため使える
# lambda
print("lam: ", best.lam)
fig, axes = plt.subplots(ncols=2, figsize=(12, 4))
axes[0].scatter(X_train, T_train, marker=".")
axes[0].plot(X_train, best.predict(X_train), color="red")
axes[0].set_title("train")
axes[1].scatter(X_test, T_test, marker=".")
axes[1].plot(X_test, best.predict(X_test), color="red")
axes[1].set_title("test")
fig.show()
```
## [練習問題](./../exercise/questions.md#chapter-5)
| github_jupyter |
```
from IPython.display import Image
import sympy as sp
import math
import numpy as np
import datetime
Image(filename='/Users/wy/Desktop/beales_function.png')
class GoldSearch(object):
def __init__(self):
self.l = 10**-5
self.alpha = (math.sqrt(5)-1)/2.
def g_lambda(self, a, b):
return a+(1-self.alpha)*(b-a)
def g_mu(self, a, b):
return a+self.alpha*(b-a)
def goldSearch(self, a, b,lambda_k,mu_k,function,k = 1):
# step1
if (b - a) < self.l:
return (a+b)/2.
if function(lambda_k) > function(mu_k):
# step2
a = lambda_k
b = b
lambda_k = mu_k
mu_k = self.g_mu(a,b)
k = k+1
return self.goldSearch(a,b,lambda_k,mu_k,function,k)
elif function(lambda_k) <= function(mu_k):
# step3
a = a
b = mu_k
mu_k = lambda_k
lambda_k = self.g_lambda(a,b)
k = k+1
return self.goldSearch(a,b,lambda_k,mu_k,function,k)
GoldSearch = GoldSearch()
def gradient(f):
return [sp.lambdify((x1,x2), f.diff(x, 1), 'numpy') for x in [x1,x2]]
```
# Fletcher_Reeves
初始點 (1,1)
GoldSearch interval -5 ~ 5
e = 10**-5
number of iterations : 24
run time : 0.91s
```
def Fletcher_Reeves(f,xj):
lambda_j = sp.symbols('lambda_j')
e = 10**-5
sj = np.array(map(lambda fun : fun( xj[0],xj[1] ),gradient(f)))*(-1)
i = 1
while np.linalg.norm(sj) > e:
i = i+1
tmp = xj+lambda_j*sj
new_f = f.subs([(x1,tmp[0]),(x2,tmp[1])])
lambdaJ = GoldSearch.goldSearch(a,b,GoldSearch.g_lambda(a,b),GoldSearch.g_mu(a,b),sp.lambdify(lambda_j , new_f))
xj_1 = xj+lambdaJ*sj
sj_1 = np.array(map(lambda fun : fun( xj_1[0],xj_1[1] ),gradient(f)))*(-1)
beta_j = np.dot(sj_1.T,sj_1)/np.dot(sj.T,sj)
sj_1 = sj_1+beta_j*sj
sj = sj_1
xj = xj_1
return xj_1,i
a = -5
b = 5
x1,x2 = sp.symbols('x1,x2')
f = (1.5-x1*(1-x2))**2 + (2.25-x1*(1-x2**2))**2 + (2.625-x1*(1-x2**3))**2
# 初始點
xj = np.array([1,1])
start = datetime.datetime.now()
xj_1,i = Fletcher_Reeves(f,xj)
end = datetime.datetime.now()
print xj_1
print i
print end - start
```
# DFP
初始點 (1,1)
GoldSearch interval -5 ~ 5
e = 10**-5
number of iterations : 8
run time : 0.34s
```
def DFP(f,xi):
lambda_i = sp.symbols('lambda_i')
e = 10**-3
gradient_f = (np.array(map(lambda fun : fun( xi[0],xi[1] ),gradient(f)))).reshape(2,1)
Bi = np.identity(2)
i = 0
while abs(np.linalg.norm(gradient_f)) > e:
i = i+1
si = (np.dot(Bi,gradient_f)*(-1)).reshape(1,2)[0]
tmp = xi+lambda_i*si
new_f = f.subs([(x1,tmp[0]),(x2,tmp[1])])
lambdaI = GoldSearch.goldSearch(a,b,GoldSearch.g_lambda(a,b),GoldSearch.g_mu(a,b),sp.lambdify(lambda_i , new_f))
xi_1 = xi+lambdaI*si
gradient_f_1 = (np.array(map(lambda fun : fun( xi_1[0],xi_1[1] ),gradient(f)))).reshape(2,1)
if abs(np.linalg.norm(gradient_f_1)) > e:
gi = (gradient_f_1 - gradient_f).reshape(1,2)[0]
Mi = (np.dot(si.reshape(2,1),si.reshape(2,1).T))*lambdaI/np.dot(si.T,gi)
Ni = np.dot(np.dot(Bi,gi).reshape(2,1),np.dot(Bi,gi).T.reshape(1,2))*(-1)/np.dot(np.dot(gi.T,Bi),gi)
Bi = Bi+Mi+Ni
xi = xi_1
gradient_f = (np.array(map(lambda fun : fun( xi[0],xi[1] ),gradient(f)))).reshape(2,1)
else:
return xi_1,i
a = -5
b = 5
x1,x2 = sp.symbols('x1,x2')
f = (1.5-x1*(1-x2))**2 + (2.25-x1*(1-x2**2))**2 + (2.625-x1*(1-x2**3))**2
xi = np.array([1,1])
start = datetime.datetime.now()
xi_1,i = DFP(f,xi)
end = datetime.datetime.now()
print xi_1
print i
print end - start
```
# BFGS
初始點 (1,1)
GoldSearch interval -5 ~ 5
e = 10**-5
number of iterations : 8
run time : 0.38s
```
def BFGS(f,xi):
lambda_i = sp.symbols('lambda_i')
e = 10**-3
gradient_f = (np.array(map(lambda fun : fun( xi[0],xi[1] ),gradient(f)))).reshape(2,1)
Bi = np.identity(2)
i = 0
while abs(np.linalg.norm(gradient_f)) > e:
i = i+1
si = (np.dot(Bi,gradient_f)*(-1)).reshape(1,2)[0]
tmp = xi+lambda_i*si
new_f = f.subs([(x1,tmp[0]),(x2,tmp[1])])
lambdaI = GoldSearch.goldSearch(a,b,GoldSearch.g_lambda(a,b),GoldSearch.g_mu(a,b),sp.lambdify(lambda_i , new_f))
xi_1 = xi+lambdaI*si
gradient_f_1 = (np.array(map(lambda fun : fun( xi_1[0],xi_1[1] ),gradient(f)))).reshape(2,1)
if abs(np.linalg.norm(gradient_f_1)) > e:
gi = (gradient_f_1 - gradient_f).reshape(1,2)[0]
di = xi_1-xi
Mi = ((1 + np.dot(np.dot(gi.T,Bi),gi)/np.dot(di.T,gi))*np.dot(di.reshape(2,1),di.reshape(1,2)))/np.dot(di.T,gi)
Ni = np.dot(np.dot(di.reshape(2,1),gi.reshape(1,2)),Bi)*(-1)/np.dot(di.T,gi)
Qi = np.dot(np.dot(Bi,gi).reshape(2,1),di.reshape(1,2))*(-1)/np.dot(di.T,gi)
Bi = Bi+Mi+Ni+Qi
xi = xi_1
gradient_f = (np.array(map(lambda fun : fun( xi[0],xi[1] ),gradient(f)))).reshape(2,1)
else:
return xi_1,i
a = -5
b = 5
x1,x2 = sp.symbols('x1,x2')
f = (1.5-x1*(1-x2))**2 + (2.25-x1*(1-x2**2))**2 + (2.625-x1*(1-x2**3))**2
xi = np.array([1,1])
start = datetime.datetime.now()
xi_1,i = BFGS(f,xi)
end = datetime.datetime.now()
print xi_1
print i
print end - start
from scipy.optimize import fmin
def fun(X):
return (1.5-X[0]*(1-X[1]))**2 + (2.25-X[0]*(1-X[1]**2))**2 + (2.625-X[0]*(1-X[1]**3))**2
fmin(fun,np.array([1,1]))
```
# scipy python做科學計算的lib
出處 : http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin.html#scipy.optimize.fmin
Minimize a function using the downhill simplex algorithm.
This algorithm only uses function values, not derivatives or second derivatives.
| github_jupyter |
```
import json
import requests
import threading
import time
import os
from seleniumwire import webdriver
from selenium.webdriver.common.proxy import Proxy, ProxyType
def interceptor(request):
if request.url.startswith('https://us-central1-popxi-f3a4d.cloudfunctions.net/stats?count='):
params = request.params
params['count'] = '5000'
request.params = params
print('Popping...')
def initBrowser(proxy = None):
options = webdriver.ChromeOptions()
options.add_argument('ignore-certificate-errors')
#options.add_argument('headless')
options.add_argument('window-size=1920x1080')
#options.add_argument("disable-gpu")
options.add_argument("--mute-audio")
#options.add_argument("--disable-gpu")
#seleniumwire_options = {
# 'enable_har': True # Capture HAR data, retrieve with driver.har
#}
#driver = webdriver.Chrome('chromedriver', options=options, seleniumwire_options=seleniumwire_options)
if proxy is not None:
chrome_options.add_argument('--proxy-server=%s' % PROXY)
driver = webdriver.Chrome('chromedriver', options=options)
driver.request_interceptor = interceptor
#driver.scopes = [
# '.*www.google.com/*',
# '.*us-central1-popxi-f3a4d.cloudfunctions.net/stats*.*'
#]
driver.get('https://popxi.click/')
driver.execute_script('var event=new KeyboardEvent("keydown",{key:"g",ctrlKey:!0});setInterval(function(){for(i=0;i<1;i++)document.dispatchEvent(event)},200);')
return driver
def getRequests(driver):
get = False
while not get:
for req in driver.requests:
if req.url.startswith('https://us-central1-popxi-f3a4d.cloudfunctions.net/stats?count='):
try:
print('Response: ' + str(req.response.status_code))
except:
print('Response: None')
print('Deleting cookies...')
driver.delete_all_cookies()
del driver.requests
driver.execute_script("window.open('https://popxi.click/');")
driver.switch_to.window(driver.window_handles[0])
driver.close()
driver.switch_to.window(driver.window_handles[0])
print('Deleted.')
time.sleep(2)
driver.execute_script('var event=new KeyboardEvent("keydown",{key:"g",ctrlKey:!0});setInterval(function(){for(i=0;i<1;i++)document.dispatchEvent(event)},200);')
time.sleep(1)
def Run(proxy = None):
print("Starting browser...")
driver = initBrowser(proxy)
print("Browser Started.")
print("Fetching requests...")
getRequests(driver)
Run()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/tonychang04/Sarcastic-Headlines-Detector/blob/main/Logistic_Regression.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Logistic Regression Model
## Accuracy: 86%
```
from google.colab import files
from google.colab import drive
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import string
import nltk
from nltk.corpus import stopwords
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
#drive.mount('/content/drive')
nltk.download('stopwords')
#/content/Sarcasm_Headlines_Dataset.json
df1 = pd.read_json('/Sarcasm_Headlines_Dataset.json', lines = True)
df2 = pd.read_json('/Sarcasm_Headlines_Dataset_v2.json', lines = True)
frames = [df1, df2]
df = pd.concat(frames) # merged two json files into 1 dataframe file
df.head(10)
# Some data visualizations...
ones = len(df[df['is_sarcastic'] == 1])
zeros = len(df[df['is_sarcastic'] == 0])
output = ['0','1']
plt.bar(output[0], [zeros])
plt.bar(output[1], [ones])
plt.legend(output)
plt.xlabel('Sarcastic(1) or Not Sarcastic(0)')
plt.ylabel('Number of Headlines')
plt.title('Number of Sarcastic/Non-Sarcastic headlines')
print(stopwords.words('english'))
# some other visualizations... (average length of the headlines - sarcastic && nonsarcastic)
# takes a while to get the valuse, just use the numbers below to make the graph
def find_average_length(df, len_sar, len_non_sar):
for i in range(len(df)):
if int(df[['is_sarcastic']].iloc[i]) == 0:
len_non_sar += int(df[['headline']].iloc[i].str.len())
else:
len_sar += int(df[['headline']].iloc[i].str.len())
sarcastic = len_sar / ones
non_sarcastic = len_non_sar / zeros
return sarcastic, non_sarcastic
sarcastic, non_sarcastic = find_average_length(df, 0, 0)
#sarcastic = 64.08620553671425
#non_sarcastic = 59.55862529195863
# run it as needed
sarcastic = 64.08620553671425
non_sarcastic = 59.55862529195863
fig = plt.figure()
ax = fig.add_subplot(111)
labels = ['sarcastic','non-sarcastic']
values = [sarcastic, non_sarcastic]
plt.bar(labels[0], values[0], color=(0.2, 0.4, 0.6, 0.6))
plt.bar(labels[1], values[1], color=(0.3, 0.8, 0.7, 0.6))
for i, v in enumerate(values):
ax.text(i, v+1, "%d" %v, ha="center")
plt.ylim(0, 75)
plt.legend(labels)
plt.xlabel('Sarcastic or Non-sarcastic')
plt.ylabel('Average number of characters')
plt.title('Average length of the headlines')
# Removing Stop words
def text_process_for_ML(mess):
nopunc = [char for char in mess if char not in string.punctuation]
nopunc = ''.join(nopunc)
#print(nopunc)
#print(no_stop_words)
return [word for word in nopunc.split() if word.lower() not in
stopwords.words('english')]
def text_process(mess):
nopunc = [char for char in mess if char not in string.punctuation]
nopunc = ''.join(nopunc)
#print(nopunc)
no_stop_words = [word for word in nopunc.split() if word.lower() not in
stopwords.words('english')]
#print(no_stop_words)
return ' '.join(no_stop_words)
df['processed_headline'] = df['headline'].apply(text_process)
df.head()
#logistic regression model
# https://www.kaggle.com/mrudhuhas/text-classification-spacy/execution
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
X_train_2, X_test_2, Y_train_2, Y_test_2 = train_test_split(df['processed_headline'], df['is_sarcastic'], test_size=0.33, random_state=42)
#train the model
classifier_lr = Pipeline([('tfidf',TfidfVectorizer()), ('clf',LogisticRegression(solver='saga'))])
classifier_lr.fit(X_train_2,Y_train_2)
#Predicting
y_pred = classifier_lr.predict(X_test_2)
yt_pred = classifier_lr.predict(X_train_2)
#Analyzing
from sklearn.metrics import accuracy_score
cm = confusion_matrix(Y_test_2,y_pred)
print(f'Confusion Matrix :\n {cm}\n')
print(f'Test Set Accuracy Score :\n {accuracy_score(Y_test_2,y_pred)}\n')
print(f'Train Set Accuracy Score :\n {accuracy_score(Y_train_2,yt_pred)}\n')
print(f'Classification Report :\n {classification_report(Y_test_2,y_pred)}')
```
| github_jupyter |
# Predicting Student Admissions with Neural Networks
In this notebook, we predict student admissions to graduate school at UCLA based on three pieces of data:
- GRE Scores (Test)
- GPA Scores (Grades)
- Class rank (1-4)
The dataset originally came from here: http://www.ats.ucla.edu/
## Loading the data
To load the data and format it nicely, we will use two very useful packages called Pandas and Numpy. You can read on the documentation here:
- https://pandas.pydata.org/pandas-docs/stable/
- https://docs.scipy.org/
```
# Importing pandas and numpy
import pandas as pd
import numpy as np
# Reading the csv file into a pandas DataFrame
data = pd.read_csv('student_data.csv')
# Printing out the first 10 rows of our data
data[:10]
```
## Plotting the data
First let's make a plot of our data to see how it looks. In order to have a 2D plot, let's ingore the rank.
```
# Importing matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
# Function to help us plot
def plot_points(data):
X = np.array(data[["gre","gpa"]])
y = np.array(data["admit"])
admitted = X[np.argwhere(y==1)]
rejected = X[np.argwhere(y==0)]
plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'red', edgecolor = 'k')
plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'cyan', edgecolor = 'k')
plt.xlabel('Test (GRE)')
plt.ylabel('Grades (GPA)')
# Plotting the points
plot_points(data)
plt.show()
```
Roughly, it looks like the students with high scores in the grades and test passed, while the ones with low scores didn't, but the data is not as nicely separable as we hoped it would. Maybe it would help to take the rank into account? Let's make 4 plots, each one for each rank.
```
# Separating the ranks
data_rank1 = data[data["rank"]==1]
data_rank2 = data[data["rank"]==2]
data_rank3 = data[data["rank"]==3]
data_rank4 = data[data["rank"]==4]
# Plotting the graphs
plot_points(data_rank1)
plt.title("Rank 1")
plt.show()
plot_points(data_rank2)
plt.title("Rank 2")
plt.show()
plot_points(data_rank3)
plt.title("Rank 3")
plt.show()
plot_points(data_rank4)
plt.title("Rank 4")
plt.show()
```
This looks more promising, as it seems that the lower the rank, the higher the acceptance rate. Let's use the rank as one of our inputs. In order to do this, we should one-hot encode it.
## TODO: One-hot encoding the rank
Use the `get_dummies` function in pandas in order to one-hot encode the data.
Hint: To drop a column, it's suggested that you use `one_hot_data`[.drop( )](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.drop.html).
```
# TODO: Make dummy variables for rank and concat existing columns
one_hot_data = pd.get_dummies(data, columns=["rank"])
# Print the first 10 rows of our data
one_hot_data[:10]
```
## TODO: Scaling the data
The next step is to scale the data. We notice that the range for grades is 1.0-4.0, whereas the range for test scores is roughly 200-800, which is much larger. This means our data is skewed, and that makes it hard for a neural network to handle. Let's fit our two features into a range of 0-1, by dividing the grades by 4.0, and the test score by 800.
```
# Making a copy of our data
processed_data = one_hot_data[:]
# TODO: Scale the columns
processed_data["gre"] /= 800
processed_data["gpa"] /= 4
# Printing the first 10 rows of our procesed data
processed_data[:10]
```
## Splitting the data into Training and Testing
In order to test our algorithm, we'll split the data into a Training and a Testing set. The size of the testing set will be 10% of the total data.
```
sample = np.random.choice(processed_data.index, size=int(len(processed_data)*0.9), replace=False)
train_data, test_data = processed_data.iloc[sample], processed_data.drop(sample)
print("Number of training samples is", len(train_data))
print("Number of testing samples is", len(test_data))
print(train_data[:10])
print(test_data[:10])
```
## Splitting the data into features and targets (labels)
Now, as a final step before the training, we'll split the data into features (X) and targets (y).
```
features = train_data.drop('admit', axis=1)
targets = train_data['admit']
features_test = test_data.drop('admit', axis=1)
targets_test = test_data['admit']
print(features[:10])
print(targets[:10])
```
## Training the 2-layer Neural Network
The following function trains the 2-layer neural network. First, we'll write some helper functions.
```
# Activation (sigmoid) function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_prime(x):
return sigmoid(x) * (1-sigmoid(x))
def error_formula(y, output):
return - y*np.log(output) - (1 - y) * np.log(1-output)
```
# TODO: Backpropagate the error
Now it's your turn to shine. Write the error term. Remember that this is given by the equation $$ (y-\hat{y}) \sigma'(x) $$
```
# TODO: Write the error term formula
def error_term_formula(x, y, output):
return (y - output) * sigmoid_prime(x)
# Neural Network hyperparameters
epochs = 1000
learnrate = 0.5
# Training function
def train_nn(features, targets, epochs, learnrate):
# Use to same seed to make debugging easier
np.random.seed(42)
n_records, n_features = features.shape
last_loss = None
# Initialize weights
weights = np.random.normal(scale=1 / n_features**.5, size=n_features)
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(features.values, targets):
# Loop through all records, x is the input, y is the target
# Activation of the output unit
# Notice we multiply the inputs and the weights here
# rather than storing h as a separate variable
output = sigmoid(np.dot(x, weights))
# The error, the target minus the network output
error = error_formula(y, output)
# The error term
error_term = error_term_formula(x, y, output)
# The gradient descent step, the error times the gradient times the inputs
del_w += error_term * x
# Update the weights here. The learning rate times the
# change in weights, divided by the number of records to average
weights += learnrate * del_w / n_records
# Printing out the mean square error on the training set
if e % (epochs / 10) == 0:
out = sigmoid(np.dot(features, weights))
loss = np.mean((out - targets) ** 2)
print("Epoch:", e)
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
print("=========")
print("Finished training!")
return weights
weights = train_nn(features, targets, epochs, learnrate)
```
## Calculating the Accuracy on the Test Data
```
# Calculate accuracy on test data
test_out = sigmoid(np.dot(features_test, weights))
predictions = test_out > 0.5
accuracy = np.mean(predictions == targets_test)
print("Prediction accuracy: {:.3f}".format(accuracy))
```
| github_jupyter |
<a href="https://colab.research.google.com/github/AureliaWambui19/Home-team-Away-team-scores-prediction/blob/main/wambui_aurelia_core_week_6_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# 1.1 Introduction
* Football is a family of team sports that involve, to varying degrees, kicking a ball to score a goal.
* The men's FIFA World Ranking is a ranking system for men's national teams in association football.
* Currently led by Belgium
* A points system is used, with points being awarded based on the results of all FIFA-recognised full international matches.
## 1.1 Defining the Question
* I have been recruited as a football analyst in a company - Mchezopesa Ltd and tasked to accomplish the task below :
* Predict the result of a game between team 1 and team 2, based on who's home and who's away, and on whether or not the game is friendly (including rank of the respective team).
## 1.2 Metrics for Success
* Obtaining about 80% Accuracy score and above
* Correctly identifying status of results(win,loss,draw)
## 1.3 The Context
The new model for calculating the FIFA/Coca-Cola World Ranking (FWR) was developed over two years
during which time a large number of different algorithms was tested and extensively discussed.
Throughout this review and consultation process, the main aim was to identify an algorithm that is not
only intuitive, easy to understand and improves overall accuracy of the formula, but also addresses
feedback received about the previous model and provides fair and equal opportunities for all teams
across all confederations to ascend the FWR
The Elo method of calculation adds/subtracts points (as opposed to averaging points) for individual
matches to/from a team’s existing point total. The points which are added or subtracted are partially
determined by the relative strength of the two opponents, including the logical expectation that teams
higher in the ranking should fare better against teams lower in the ranking.
## 1.4 Experimental design taken
- Perform your EDA
- Perform any necessary feature engineering
- Check of multicollinearity
- Building a model
* Approach 1: Polynomial regression model
* Model 1: Predict how many goals the home team scores
* Model 2: Predict how many goals the away team scores
* Approach 2: Logistic regression model
* Figure out from the home team’s perspective if the game is a Win, Lose or Draw (W, L, D)
- Cross-validate the model
- Compute RMSE
- Create residual plots for the model
- Assess Heteroscedasticity using Bartlett’s test
## 1.5 Appropriateness of the available Data
This project has two datasets:
* Ranking dataset: contains the team ranks from 1993 to 2018
* Results dataset: contains matches and the team scores since 1892 to 2019
The link to the dataset is:
* https://drive.google.com/open?id=1BYUqaEEnFtAe5lvzJh9lpVpR2MAvERUc
The data is relevant for this project
# 2 Data Understanding
```
# Importing Libraries we use for our analysis
import pandas as pd
import numpy as np
import scipy as sp
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn import metrics
from sklearn.model_selection import KFold, LeaveOneOut
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
```
# 2.1 Viewing our Datasets
```
# Reading the datasets
FifaRank = pd.read_csv('fifa_ranking.csv')
FifaResult = pd.read_csv('results.csv')
# Viewing the top 3 observation in the fifa ranking dataset
FifaRank.head(3)
# Viewing the last 3 observation in the fifa ranking dataset
FifaRank.tail(3)
# Viewing the last 3 observation in the result dataset
FifaResult.tail(3)
```
# 2.2 Checking data
```
# Checking the size of the fifa ranking dataset
FifaRank.shape
```
This dataset has 57993 rows and 16 columns
```
# checking the size of the results dataset
FifaResult.shape
```
This dataset has 40839 rows and 9 columns
```
# Checking the ranking dataset information
FifaRank.info()
# Checking the result dataset information
FifaResult.info()
# Viewing the column names of the ranking dataset
FifaRank.columns
# Viewing the column names of the result dataset
FifaResult.columns
```
# 3 Feature Engineering
```
# finding the difference of scores and storing them in a new column called game_result
FifaResult['game_results'] = FifaResult['home_score'] -FifaResult['away_score']
FifaResult.head(4)
# Creating a function to specify whether its a win , loss or a draw based on a home team perspective
# the results (its a win , loss or a draw ) are stored in a new column called status
def home_team(game_results):
if game_results > 0:
return 'win'
elif game_results < 0:
return 'loss'
else:
return 'draw'
FifaResult['status'] = FifaResult.game_results.apply(lambda w: home_team(w))
FifaResult.head(2)
# finding unique values in tournament column
FifaResult.tournament.unique()
# Changing the tournament type into three categories
# Tournament type (World cup, Friendly, Other)
# The 3 respective category will be stored in a new column named tournament_type
def ton(tournament_type):
if tournament_type == 'Friendly':
return 'Friendly'
elif tournament_type == 'FIFA World Cup':
return 'World cup'
else:
return 'Other'
FifaResult['tournament_type'] = FifaResult.tournament.apply(lambda t: ton(t))
FifaResult.head(2)
# Changing the dates column in both datasets into datetime format
FifaResult['date'] = pd.to_datetime(FifaResult['date'])
FifaRank['rank_date'] = pd.to_datetime(FifaRank['rank_date'])
# Confirming that we have changed the date columns into datetime datatypes
print(FifaRank.rank_date.dtypes)
print(' ')
print(FifaResult.date.dtypes)
# Extracting the year and month from the date column;
# Here we will create a new column for each
FifaResult['year'] = pd.DatetimeIndex(FifaResult['date']).year
FifaResult['month'] = pd.DatetimeIndex(FifaResult['date']).month
FifaRank['year'] = FifaRank['rank_date'].dt.year
FifaRank['month'] =FifaRank['rank_date'].dt.month
# confirming the changes
FifaResult.head(3)
# confirming changes
FifaRank.head(2)
# changing the full country column name in ranking dataset to home_team so as to ease manipulation of the datasets when merging them later
FifaRank= FifaRank.rename({'country_full': 'home_team'}, axis = 1)
# confirming changes
FifaRank.head(2)
# Dropping unnecessary columns in result dataset
FifaResult.drop(['date', 'game_results'], axis = 1, inplace = True)
FifaResult.columns
# Dropping unnecessary columns in rank dataset
FifaRank.drop(['country_abrv','rank_date', 'total_points', 'previous_points','cur_year_avg', 'cur_year_avg_weighted' ,'last_year_avg' , 'last_year_avg_weighted' , 'two_year_ago_avg', 'two_year_ago_weighted', 'three_year_ago_avg' ,'three_year_ago_weighted', 'confederation'], axis =1, inplace = True)
# Merging datasets
# Based on home_team, year, month
home_me= pd.merge(FifaResult,FifaRank, how="left", on = ['home_team', 'year', 'month'])
# viewing our merged dataset 4 top observations
home_me.head(4)
# viewing our merged dataset lastb 3 observations
home_me.tail(3)
# renaming the rank column name to home_rank so as to get the respective rank of the home team
home_me = home_me.rename({'rank': 'home_rank'}, axis = 1)
# Confirming changes
home_me.head(2)
# renaming the column home_team (originally called country full) as away team so that we get their individual ranks of away teams
FRankone= FifaRank.rename({'home_team': 'away_team'}, axis = 1)
FRankone.head(2)
# Merging the home_merged dataset with the
# Based on away_team, year, month
Fiifa = pd.merge(home_me,FRankone, how="left", on = ['away_team', 'year', 'month'])
# Checking the first two observations of the merged dataset
Fiifa.head(2)
# renaming the rank column as away rank in the new dataframe
Fiifa = Fiifa.rename({'rank': 'away_rank'}, axis = 1)
Fiifa.head()
```
# 4 Tyding the dataset
```
# checking for unique year rankings
FifaRank.year.unique()
```
Rankings are from 1993 t0 2018 .after merging a lot of missing values were noted especialy in years before 1993 and after 2018.therefore i will drop the data where this was observed as there are no rankings available
```
Fiifa.dropna(inplace = True)
# confirming that there are no null values
Fiifa.isnull().sum()
# checking for duplicates
Fiifa.duplicated().sum()
# dropping the duplicates
Fiifa.drop_duplicates(inplace = True)
# Checking that we have no duplicates in the data
Fiifa.duplicated().sum()
# checking columns of merged dataset
Fiifa.columns
# viewing our dataset after cleaning
Fiifa.head()
# checking the shape of the cleaned data
Fiifa.shape
```
This dataset has 16889 rows and 16 columns
```
# Encoding the categorical columns so as to manage perform operations such as correlation check
#
le = LabelEncoder()
Fiifa= Fiifa.apply(le.fit_transform)
# Confirming the changes
Fiifa.head(5)
# checking for outliers in our dataset
# Using boxplots
# Labeling the title of our chart
# Displaying chart
plt.figure(dpi = 100)
ax = sns.boxplot(data = Fiifa,orient='h')
plt.title(' Outliers in Fifa dataset', color = 'red')
plt.xlabel(' Frequency')
plt.show()
```
# 5 Exploratory data analysis
```
h=Fiifa['home_score']
plt.hist(h, histtype='bar', rwidth=0.9)
plt.xlabel('No. of home scores')
plt.ylabel('Quantity')
plt.title('number of home scores',color='red')
plt.show()
```
home teams scored mostly one goal
```
a=Fiifa['away_score']
plt.hist(h, histtype='bar', rwidth=0.9)
plt.xlabel('No. of away scores')
plt.ylabel('Quantity')
plt.title('number of away scores',color='red')
plt.show()
```
Most away teams score atleast one goal
Both histograms are positively skewed .This shape indicates that there are a number of data points, perhaps outliers, that are greater than the mode
```
# status of game results in respect tothe home team(draw = 0, lose =1, win = 2)
# Using a countplot to visualize these results
# Using Seaborn
# Labeling the x and y axis
# Giving a title to our chart
# Displaying our chart
plt.figure(figsize = (6,6), dpi = 80)
sns.countplot(Fiifa['status'])
plt.xlabel('status (draw = 0, lose =1, win = 2)')
plt.ylabel('Count')
plt.title('status of games results', color = 'red')
plt.show()
```
The above bar chart above shows that wins by the home teams are high as compared to loss/draws
# 6 Multicollinearity
* Checking for multicollinearity
* Solving multicollinearity
```
# Before we build a model we shall check if
# the independent variables are collinear/ correlated to each other
# Getting the pearson correation coefficient for each of the variables
correlation = Fiifa.corr()
correlation
```
The correlation matrix indicates that most variables are moderately or weakly correlated.*(both positively and negatively)
This is very beneficial when creating a model, as collinear variables reduce the power of the model to identify independent
variables that are statistically significant.
We will use the correlation matrix to calculate the vif (Variance Inflation Factor).
Variance inflation factor (VIF) is a measure of the amount of multicollinearity in a set of multiple regression variables. Mathematically, the VIF for a regression model variable is equal to the ratio of the overall model variance to the variance of a model that includes only that single independent variable. This ratio is calculated for each independent variable. A high VIF indicates that the associated independent variable is highly collinear with the other variables in the model.
```
# checking for multicollinearity
# Using the variance Inflation Factor (VIF)
#
# This is calculated using linear algebra inverse function
pd.DataFrame(np.linalg.inv(correlation.values), index = correlation.index, columns = correlation.columns)
# From the correlation matrix below there are no correlated independent variables as all have VIF below 5, which is the threshold
```
We check VIFs along the diagonal.
VIFs Values greater than 5 indicate that the presence of multicollinearity.
If present we remove the variable with the greatest VIF value.
Typically, a VIF value around 5 is a potential problem, and value around 10 is considered seriously problematic and suggests that the related variable should be dropped from the model.
From the correlation matrix there are no correlated independent variables as all have VIF values are below 5, which is the threshold and therefore no variable will be dropped in this project
# 8 Building a Model
## 8.1 Polynomial Regression Model
```
# Approach 1: Polynomial approach
# What to train given:
# Rank of home team
# Rank of away team
# Tournament type
# Model 1: Predict how many goals the home team scores
# Model 2: Predict how many goals the away team scores
```
### Model 1
Predict how many goals the home team scores
```
# Viewing our dataset before splitting
Fiifa.head(2)
# Model 1
# Predict how many goals the home team scores given home rank
X = Fiifa['home_rank'].values.reshape(-1, 1)
y = Fiifa['home_score'].values.reshape(-1, 1)
# showing relationship between home rank and home score
plt.scatter(X,y)
plt.title('Home team performance', color = 'red')
plt.xlabel('homerank')
plt.ylabel('home score')
plt.show()
```
There are more points on the lower side of the scatter plot.
home team scores are mostly between 0 and 5 goals.
```
X.shape
y.shape
# Split the dataset into train and test sets
# this means training data is 80% while test size is 20%
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state=2)
# Fitting the polynomial features to the X the independent variable
poly_reg = PolynomialFeatures(degree =4)
X_poly = poly_reg.fit_transform(X)
# Fitting a polynomial Regression to the dataset.
pol_reg = LinearRegression()
pol_reg.fit(X_poly, y)
# Visualizing the polynomial Regression results
plt.scatter(X, y, color='blue')
plt.plot(X, pol_reg.predict(X_poly),color='red')
plt.title('home score prediction')
plt.xlabel('home rank')
plt.ylabel('home score')
plt.show()
```
Using the polynomial regression model of degree 4,
most data points have been omitted
the visualization as appears makes it difficult to analyze and makes use of this model difficult to use for predictions
```
# Making predictions using our model
poly_pred = pol_reg.predict(poly_reg.fit_transform([[20]]))
print('Polynomial prediction when home rank is 20 the home team score is: %d' %poly_pred)
```
### Model 2
Predict how many goals the away team scores
```
# Model 2: Predict how many goals the away team scores given the away team rank
#
X = Fiifa['away_rank'].values.reshape(-1, 1)
y = Fiifa['away_score'].values.reshape(-1, 1)
# Visualizing the dependent vs independent variable using a scatter plot
plt.scatter(X,y)
plt.title('away team performance', color = 'red')
plt.xlabel('away rank')
plt.ylabel('away score')
plt.show()
```
most cases the away team scores between 0 and 4 goals.
```
## Split the dataset into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state=0)
# Fitting the polynomial features to the X
poly_reg = PolynomialFeatures(degree = 4)
X_poly = poly_reg.fit_transform(X)
# Fitting a polynomial Regression to the dataset
pol_reg = LinearRegression()
pol_reg.fit(X_poly, y)
# Visualizing the polynomial Regression results using a scatter plot
plt.scatter(X, y, color='blue')
plt.plot(X, pol_reg.predict(X_poly),color='red')
plt.title('away team prediction')
plt.xlabel('away rank')
plt.ylabel('away score')
plt.show()
```
Using the polynomial regression model of degree 4,
most data points have not been highlighted
This is underfitting.
The polynomial regression is not a good model to predict how many goals the away team scores given the away team rank.
```
# Making predictions using our model
poly_pred = pol_reg.predict(poly_reg.fit_transform([[58]]))
print('Polynomial prediction when home away rank is 58 the away team score is: %d' %poly_pred)
```
## 8.2 Logistic Regression Model
- Logistic regression is a predictive analysis.
- Logistic regression is used to describe data and to explain the relationship between one dependent binary variable and one or more nominal, ordinal, interval or ratio-level independent variables.
- Logistic Regression is used when the dependent variable(target) is categorical.
- In this model, we will be predicting whether the home team (Wins, Losses or Draws) in a match.
```
# Viewing the first two observations before splittig our dataset
Fiifa.head(2)
# Splitting our dataset
# X: independent variables
# y: dependent variable
# Splitting the data into train and test sets
X = Fiifa.drop(['status'], axis = 1)
y = Fiifa.status
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .2, random_state=0)
# Fitting our model to our train sets
# Logistic Regression model in this case
#
LogReg = LogisticRegression()
LogReg.fit(X_train, y_train)
# Using our model to make a prediction
y_pred = LogReg.predict(X_test)
# Evalauting the model
print(accuracy_score(y_test, y_pred))
```
The model has an accuracy score of 62.72%
# 9 Cross-Validation
```
# Using KFolds
# Splitting our dataset
# independet variables as X
# dependent variable as y
X = Fiifa.drop(['status'], axis = 1).values
y = Fiifa.status.values
# specifying the number of folds
folds = KFold(n_splits = 10)
# We now create and assess 10 models based on the folds we created.
RMSES = [] # An array of RMSEs to keep track of the RSME of each model
count = 1 # starting point # helps to keep track of the model number in training
for train_index, test_index in folds.split(X):
# Setting up the train and test based on the split determined by KFold
# With 10 folds we split our data into training and test sets
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# fitting a Logistic regression model
LogReg = LogisticRegression()
LogReg.fit(X_train, y_train)
# Assess the accuracy of the model
y_pred = LogReg.predict(X_test)
# Calculating the RMSES of each model
# Appending each RMSE into the list earlier created
rmse_value = np.sqrt(metrics.mean_squared_error(y_test, y_pred))
RMSES.append(rmse_value)
# printing each model RMSE
print('Model ' + str(count) + ' Root Mean Squared Error:',rmse_value)
count = count + 1
# Printing the mean of the RMSES in all the 10 models
print(np.mean(RMSES))
# Visualizing the 10-folds RMSES using a scatter plot
plt.plot(RMSES)
plt.ylabel('RMSE value')
plt.title("RMSE line plot", color = 'red')
plt.xlabel('model ID')
plt.show()
```
# 10.Heteroskedisity
Heteroscedasticity means unequal scatter. In regression analysis, we talk about heteroscedasticity in the context of the residuals or error term. Specifically, heteroscedasticity is a systematic change in the spread of the residuals over the range of measured values.
```
# First: splitting our dataset
# Into the feature set and the target variable
X = Fiifa.drop(['status'], axis = 1)
y = Fiifa.status
# Split the dataset into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state=0)
# Fitting a Logistic model
LogReg = LogisticRegression()
LogReg.fit(X_train, y_train)
# Using our model to make a prediction
y_pred = LogReg.predict(X_test)
# We now create the residual by subtracting the test value from the predicted
# value for each row in our dataset
residuals = np.subtract(y_pred, y_test)
# Creating a summary description of the residuals:
pd.DataFrame(residuals).describe()
residuals.mean()
```
## 10..1 Residual Plots
```
# Visualizing the residuals using a scatter plot
plt.scatter(y_pred, residuals, color='black')
plt.ylabel('residual')
plt.xlabel('predicted values')
plt.axhline(y= residuals.mean(), color='red', linewidth=1)
plt.show()
```
Residuals are centered around a mean of appx 0.43
…positive values for the residual (on the y-axis) mean the prediction was too low, and negative values mean the prediction was too high; 0 means the guess was exactly correct
## 10.2 Barlett's test
```
# Carrying out Barlett's test
# It is a more thorough heteroskedasticity test.
test_result, p_value = sp.stats.bartlett(y_pred, residuals)
# To interpret the results we must also compute a critical value of the chi squared distribution
degree_of_freedom = len(y_pred)-1
probability = 1 - p_value
critical_value = sp.stats.chi2.ppf(probability, degree_of_freedom)
print(p_value)
# If the test_result is greater than the critical value, then we reject our null
# hypothesis. This would mean that there are patterns to the variance of the data
# Otherwise, we can identify no patterns, and we accept the null hypothesis that
# the variance is homogeneous across our data
if (test_result > critical_value):
print('the variances are unequal, and the model should be reassessed')
else:
print('The variances are Homogeneous!')
```
| github_jupyter |
##### Authors:
- Vikram Hanumanthrao Patil
- Prashantkumar Kulkarni
##### Date: 2/6/2019
##### Version: 3.0
##### Environment: Python 3.6.1 and Jupyter notebook
# Table of contents
### 1. [Importing libraries](#library)
### 2. [Initialization](#initialisation)
### 3. [Read training and label](#read_train)
### 4. [Data pre-processing](#preprocess)
### 5. [Feature generation](#feature)
- #### 5.1 [Dimention reduction technique(Chi-squared)](#dimension)
- #### 5-2 [Multinomial logistic regression](#model)
- #### 5-3 [Cross-validation](#cv)
### 6. [Predict on test data](#test)
## 1. Importing libraries <a name="library"></a>
```
import pandas as pd
import numpy as np
from tqdm import tqdm
from pattern.en import parse
from nltk.corpus import stopwords
import string
import re
import nltk
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn import svm
import swifter
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn import metrics
import seaborn as sns
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from sklearn.feature_selection import SelectKBest, chi2
import warnings
warnings.filterwarnings("ignore")
```
## 2. Initialization<a name="initialisation"></a>
### Creating a custom dictionary to expand all the decontract words
```
#initialising the lemmatizer.
wn = nltk.WordNetLemmatizer()
# Creating a custom dictionary to expand all the decontract words
appos = {
"aren't" : "are not", "can't" : "cannot", "couldn't" : "could not", "didn't" : "did not", "doesn't" : "does not",
"don't" : "do not", "hadn't" : "had not", "hasn't" : "has not", "haven't" : "have not",
"he'd" : "he would", "he'll" : "he will", "he's" : "he is", "i'd" : "I would",
"i'd" : "I had", "i'll" : "I will", "i'm" : "I am", "isn't" : "is not",
"it's" : "it is", "it'll":"it will", "i've" : "I have", "let's" : "let us",
"mightn't" : "might not", "mustn't" : "must not", "shan't" : "shall not", "she'd" : "she would",
"she'll" : "she will", "she's" : "she is", "shouldn't" : "should not", "that's" : "that is",
"there's" : "there is", "they'd" : "they would", "they'll" : "they will", "they're" : "they are",
"they've" : "they have", "we'd" : "we would", "we're" : "we are", "weren't" : "were not",
"we've" : "we have", "what'll" : "what will", "what're" : "what are", "what's" : "what is",
"what've" : "what have", "where's" : "where is", "who'd" : "who would", "who'll" : "who will",
"who're" : "who are", "who's" : "who is", "who've" : "who have", "won't" : "will not",
"wouldn't" : "would not", "you'd" : "you would", "you'll" : "you will","you're" : "you are",
"you've" : "you have", "'re": " are", "wasn't": "was not", "we'll":" will","didn't": "did not"
}
#reference[1]
```
## 3. Reading the training data and labels <a name="read_train"></a>
### merging both of them
```
data = pd.read_csv("train_data.csv", sep=',') # read training data
data_labels = pd.read_csv("train_label.csv", sep=',') # read training labels
df=pd.merge(data,data_labels,on='trn_id',how='left') # merging both of them
```
## 4. Data pre-processing <a name="preprocess"></a>
```
#--------------------------
# Data pre-processing step
#--------------------------
def pre_process(text):
"""
Takes in a string of text, then performs the following:
1. converts to lower
2. Splits the sentence into tokens
3. Decontract the words. For example: "won't" --> "will not"
4. Lemmatization, reduces words to their base word
5. Returns the sentence of the cleaned text
"""
text = "".join([word.lower() for word in text])
tokens = text.split(" ")
tokens = [appos[word] if word in appos else word for word in tokens]
text = " ".join([wn.lemmatize(word) for word in tokens])
return text
#--------------------------
# execute pre-processing
#--------------------------
df['text']=df.swifter.apply(lambda x:pre_process(x['text']),axis=1)
```
## 5. Feature generation <a name="feature"></a>
### 5.1- Dimension reduction technique (Chi-square)<a name="dimension"></a>
```
#--------------------------------------
#dimension reduction using chi-square
#--------------------------------------
x_train, x_validation, y_train, y_validation = train_test_split(df['text'], df['label'], test_size=.02)
tvec = TfidfVectorizer(max_features=100000,ngram_range=(1, 3))
x_train_tfidf = tvec.fit_transform(x_train)
x_validation_tfidf = tvec.transform(x_validation)
#reference[2]
```
### 5-2 Multinomial logistic regression<a name="model"></a>
```
ch = SelectKBest(chi2, k=40000)
x_train_feature_selected=ch.fit_transform(x_train_tfidf, y_train)
x_test_chi_selected = ch.transform(x_validation_tfidf)
from sklearn import linear_model
clf = linear_model.LogisticRegression(multi_class='multinomial',solver = 'newton-cg')
clf.fit(x_train_feature_selected, y_train)
score = clf.score(x_test_chi_selected, y_validation)
score
```
### 5-3 Cross-validation <a name="cv"></a>
```
from sklearn.model_selection import KFold, cross_val_score
#rf = RandomForestClassifier(n_jobs=-1)
k_fold = KFold(n_splits=3)
cross_val_score(clf, x_train_chi2_selected, y_train, cv=k_fold, scoring='accuracy', n_jobs=-1)
```
--------------------------------
# 6.Prediction on test data<a name="test"></a>
```
#--------------------------------------
## Reading the test file into dataframe
#--------------------------------------
test=pd.read_csv("test_data.csv", sep=',')
#--------------------------------------------------------------------
## Cleaning the test data as per the cleaning technique of train data
#--------------------------------------------------------------------
test['text']=test.swifter.apply(lambda x:pre_process(x['text']),axis=1)
#--------------------------------------------------------------------
## Transforming the text into vector tfidf vectorizer with chi-sqaure
#--------------------------------------------------------------------
test_matrix= tvec.transform(test['text'])
test_matrix = ch.transform(test_matrix)
#---------------------------------------------------------------------
## predicting the labels, storing it as label column in test dataframe
#---------------------------------------------------------------------
test['label'] = pd.DataFrame(clf.predict(test_matrix))
#-----------------------------------------------------------
## dropping all other columns keeping only test_id and label
#-----------------------------------------------------------
test=test[['test_id','label']]
############################################################
#--------------------------------
#Converting the dataframe to csv
#--------------------------------
test.to_csv('predict_label.csv',index=False)
```
# References
.[1] https://drive.google.com/file/d/0B1yuv8YaUVlZZ1RzMFJmc1ZsQmM/view
[2] https://github.com/tthustla/twitter_sentiment_analysis_part8/blob/master/Capstone_part4-Copy6.ipynb
| github_jupyter |
```
import pandas as pd
import re
data = pd.read_csv('pesquisausuarios.csv')
df_oportunidade = pd.DataFrame()
for column in data.columns:
if "Satisfacao" in column:
m = re.search('Satisfacao (.*)', column)
new_col = m.group(1)
df_oportunidade["Oportunidade " + new_col] = data.apply(lambda row: row["Importancia " + new_col] + max(int(row["Importancia " + new_col] - row["Satisfacao " + new_col]),0), axis=1)
df_oportunidade
```
# Descobre os clusters
```
from sklearn.cluster import KMeans
cluster = KMeans(n_clusters=2)
data['Cluster'] = cluster.fit_predict(data.iloc[:,2:])
data
```
# Agrupa por Cluster
```
df = data.iloc[:,1:].groupby(['Cluster'], as_index = False).mean()
df
```
# Agrupa outcomes e cluster
```
# Transforma colunas de Outcome em linhas, agrupando por Outcome e Cluster
outcomes = pd.melt(df, id_vars=[('Cluster')])
outcomes
Importancia = outcomes[outcomes.variable.str.contains("Importancia.*")]
Satisfacao = outcomes[outcomes.variable.str.contains("Satisfacao.*")]
```
# Descobre Outcomes atrativos
```
new = {'Outcome': Importancia['variable']}
df_segmento = pd.DataFrame(data=new)
df_segmento['Cluster'] = Importancia['Cluster']
df_segmento['Satisfacao'] = Satisfacao['value'].values #ler https://stackoverflow.com/a/26221919
df_segmento['Importancia'] = Importancia['value']
df_segmento.tail()
```
# Calcular oportunidade e segmento de oportunidade
```
def calcular_oportunidade_segmento(row):
row['Oportunidade'] = row['Importancia'] + (row['Importancia'] - row['Satisfacao'])
if row['Oportunidade'] > 15.0:
row['Segmento_oportunidade'] = 'Muito atrativo'
elif row['Oportunidade'] > 10.0 and row['Oportunidade'] < 15.0:
row['Segmento_oportunidade'] = 'Atrativo'
else:
row['Segmento_oportunidade'] = 'Não atrativo'
return row
df_segmento = df_segmento.apply(calcular_oportunidade_segmento, axis=1)
df_segmento.tail()
from ggplot import *
import matplotlib.pyplot as plt
import seaborn as sns
ggplot(df_segmento, aes(x='Satisfacao', y='Importancia', color='Cluster')) + \
geom_point(size=75) + \
ggtitle("Customers Grouped by Cluster") + \
xlim(1, 10) + \
ylim(1, 10)
g = sns.FacetGrid(df_segmento, hue="Cluster", size=6)
g.map(plt.scatter, "Satisfacao", "Importancia", s=50, alpha=.7, linewidth=.5, edgecolor="white")
g.set(xlim=(1, 10), ylim=(1, 10));
g.add_legend();
import pandas as pd
import numpy as np
from factor_analyzer import FactorAnalyzer
fa = FactorAnalyzer()
fa.analyze(df_oportunidade.iloc[:,1:-2], 2, rotation='varimax', method='MINRES')
new_df = fa.loadings
#new_df.loc[new_df['Factor1'] < 0.1, 'Factor1'] = np.nan
#new_df.loc[new_df['Factor2'] < 0.1, 'Factor2'] = np.nan
#new_df.loc[new_df['Factor3'] < 0.1, 'Factor3'] = np.nan
#new_df.loc[new_df['Factor4'] < 0.1, 'Factor4'] = np.nan
new_df[(new_df.Factor1 > 0.1) | (new_df.Factor2 > 0.1)]
# Keep in mind that each of the identified factors should have at least three variables
# with high factor loadings, and that each variable should load highly on only one factor.
fa.get_factor_variance()
```
| github_jupyter |
```
import numpy as np
import matplotlib.pylab as plot
from astropy.io import ascii,fits
from scipy import interpolate
import grb_catalogs_copy
from BurstCube.LocSim.Detector import *
from BurstCube.LocSim.Spacecraft import *
from astropy.coordinates import SkyCoord
from astropy import units as u
from scipy.optimize import curve_fit
import math
from astropy.table import Table
import pandas as pd
## code to use when reading in GBM effective area in order to get data into the desired format
def getGBMdata(gbmfile=None):
"""Reads the GBM NaI effective area file and returns a numpy array
with two columns ``energy`` and ``aeff``.
Parameters
----------
gbmfile : string
Name of file that contains the GBM data.
Returns
----------
gbmdata : array
numpy array with two columns ``energy`` and ``aeff``
"""
return np.genfromtxt(gbmfile,skip_header=2,names=('energy', 'aeff'))
## bit of useful code for interpolating in log space
def loginterpol(x,y,x1):
f=interpolate.interp1d(np.log10(x),np.log10(y),bounds_error=False,fill_value="extrapolate",kind='linear')
y1=10**f(np.log10(x1))
return y1
def loginterpol2d(x,y,z,x1,y1):
wz=np.where(z==0)[0]
zz=z
zz[wz]=1.
f=interpolate.interp2d(x,y,np.log10(zz),bounds_error=False,fill_value="extrapolate",kind='linear')
z1=10**f(x1,y1)
#read in GBM Trigger Catalog
trigfit=fits.open('gbmtrigcat.fits')
trig=trigfit[1].data
#read in GBM Burst Catalog
gbmfit=fits.open('gbmgrbcat_copy.fits')
gbm=gbmfit[1].data
## generate random positions on the sky with equal area probability
def random_sky(n=1):
u=np.random.rand(n)
v=np.random.rand(n)
phi=2*np.pi*u
theta=np.arccos(2*v-1.)
dec=-np.degrees(theta-np.pi/2.)
ra=np.degrees(np.pi*2-phi)
return ra,dec
#function to match GRBs in the Trigger catalog to those in the grb catalog so that we can create an array of the grbs in both
#We will use the trigger timescale found in the trigger catalog
def match_catalogs_name(name1,name2):
ind_dict = dict((q,e) for e,q in enumerate(name1))
inter = set(ind_dict).intersection(name2)
m1 = [ind_dict[element] for element in inter]
print(np.shape(m1))
ind_dict = dict((q,e) for e,q in enumerate(name2))
inter = set(ind_dict).intersection(name1)
m2 = [ind_dict[element] for element in inter]
print(np.shape(m2))
return m1,m2
#ordering the trig and gbm catalog so that they are in the same order
so=np.argsort(np.array(trig['NAME']))
trig=trig[so]
so=np.argsort(np.array(gbm['NAME']))
gbm=gbm[so]
#creating array of grbs that are found in both catalogs
m1, m2 = match_catalogs_name(trig['NAME'],gbm['NAME'])
#defining our two samples of bursts that are found in both catalogs so that we can utilize them further down
trigbursts = trig[m1]
gbmbursts = gbm[m2]
print(gbmbursts['NAME'])
## read in the GBM Aeff
aeff_gbm = getGBMdata('/home/alyson/NASA/Simulation/BurstCube/Users/ajoens/gbm_effective_area.dat')
## read in BurstCube Aeff for various BC configurations
file='/home/alyson/NASA/Simulation/BurstCube/Users/jracusin/BC_eff_area_curves.ecsv'
bcaeffs=ascii.read(file,format='ecsv')
## separate GBM short & long GRBs
w=np.where(gbmbursts['FLUX_1024']>0)
gbmbursts=gbmbursts[w]
s=np.where((gbmbursts['T90'] <= 2.)&((gbmbursts['PFLX_SPECTRUM_STOP']-gbmbursts['PFLX_SPECTRUM_START'])>0))[0]
l=np.where(gbmbursts['T90'] > 2.)[0]
m=np.where(gbmbursts['PFLX_BEST_FITTING_MODEL'][s] == ' ')
## grab short GRBs with peak spectral info & plot all of the Aeff curves
bceng=bcaeffs['keV']
bcengdiff=bceng[1:]-bceng[0:-1]
w=np.where(bcengdiff<0)[0]
nsims=len(w)
w=np.append(-1,w)#,len(eng))
for i in range(nsims):
plot.plot(bcaeffs['keV'][w[i]+1:w[i+1]+1],bcaeffs['aeff'][w[i]+1:w[i+1]+1])
plot.xscale('log')
plot.yscale('log')
plot.xlabel('Energy (keV)')
plot.ylabel(r'Effective Area (cm$^2$)')
plot.plot(aeff_gbm['energy'],aeff_gbm['aeff'])
i=0
gbmae=loginterpol(aeff_gbm['energy'],aeff_gbm['aeff'],bceng[w[i]+1:w[i+1]+1])
plot.plot(bceng[w[i]+1:w[i+1]+1],gbmae)
plot.show()
## grab energies from those curves and create an array of the energies
E=np.array(bceng[w[i]+1:w[i+1]+1])
print(E)
#Integrating the best fit spectrum for each GRB in the energy range of 50-300 KeV to get max. observed photon flux.
#Doing the same but also folding in the effective area in order to get count rate.
#This will give us the photon flux in units of ph/cm^2/s.
mo=gbmbursts['PFLX_BEST_FITTING_MODEL'][s]
bcpf=np.zeros(len(s))
pf=np.zeros(len(s))
gbmcr=np.zeros(len(s))
bccr=np.zeros(len(s))
outE=np.logspace(np.log10(50),np.log10(300),100) # returns numbers spaced evenly on a log scale
for i in range(len(s)):
for j in range(nsims):
E=np.array(bceng[w[j]+1:w[j+1]+1])
AeffBC=loginterpol(E,bcaeffs['aeff'][w[j]+1:w[j+1]+1],outE)
AeffGBM=loginterpol(aeff_gbm['energy'],aeff_gbm['aeff'],outE) #eng[w[j]+1:w[j+1]+1])
Aratio=(AeffBC/AeffGBM)
# this should give us an array of the maximum observed photon flux for GBM
if mo[i]=='PFLX_PLAW':
gbmcr[i]=np.trapz(gbmbursts['PFLX_PLAW_AMPL'][s[i]]*grb_catalogs_copy.pl(outE,gbmbursts['PFLX_PLAW_INDEX'][s[i]])*AeffGBM,outE)
pf[i]=np.trapz(gbmbursts['PFLX_PLAW_AMPL'][s[i]]*grb_catalogs_copy.pl(outE,gbmbursts['PFLX_PLAW_INDEX'][s[i]]),outE)
bccr[i]=np.trapz(gbmbursts['PFLX_PLAW_AMPL'][s[i]]*grb_catalogs_copy.pl(outE,gbmbursts['PFLX_PLAW_INDEX'][s[i]])*AeffGBM*Aratio,outE)
bcpf[i]=np.trapz(gbmbursts['PFLX_PLAW_AMPL'][s[i]]*grb_catalogs_copy.pl(outE,gbmbursts['PFLX_PLAW_INDEX'][s[i]])*Aratio,outE)
if mo[i]=='PFLX_COMP':
gbmcr[i]=np.trapz(gbmbursts['PFLX_COMP_AMPL'][s[i]]*grb_catalogs_copy.comp(outE,gbmbursts['PFLX_COMP_INDEX'][s[i]],gbmbursts['PFLX_COMP_EPEAK'][s[i]])*AeffGBM,outE)
pf[i]=np.trapz(gbmbursts['PFLX_COMP_AMPL'][s[i]]*grb_catalogs_copy.comp(outE,gbmbursts['PFLX_COMP_INDEX'][s[i]],gbmbursts['PFLX_COMP_EPEAK'][s[i]]),outE)
bccr[i]=np.trapz(gbmbursts['PFLX_COMP_AMPL'][s[i]]*grb_catalogs_copy.comp(outE,gbmbursts['PFLX_COMP_INDEX'][s[i]],gbmbursts['PFLX_COMP_EPEAK'][s[i]])*AeffGBM*Aratio,outE)
bcpf[i]=np.trapz(gbmbursts['PFLX_COMP_AMPL'][s[i]]*grb_catalogs_copy.comp(outE,gbmbursts['PFLX_COMP_INDEX'][s[i]],gbmbursts['PFLX_COMP_EPEAK'][s[i]])*Aratio,outE)
if mo[i]=='PFLX_BAND':
gbmcr[i]=np.trapz(gbmbursts['PFLX_BAND_AMPL'][s[i]]*grb_catalogs_copy.band(outE,gbmbursts['PFLX_BAND_ALPHA'][s[i]],gbmbursts['PFLX_BAND_EPEAK'][s[i]],gbmbursts['PFLX_BAND_BETA'][s[i]])*AeffGBM,outE)
pf[i]=np.trapz(gbmbursts['PFLX_BAND_AMPL'][s[i]]*grb_catalogs_copy.band(outE,gbmbursts['PFLX_BAND_ALPHA'][s[i]],gbmbursts['PFLX_BAND_EPEAK'][s[i]],gbmbursts['PFLX_BAND_BETA'][s[i]]),outE)
bccr[i]=np.trapz(gbmbursts['PFLX_BAND_AMPL'][s[i]]*grb_catalogs_copy.band(outE,gbmbursts['PFLX_BAND_ALPHA'][s[i]],gbmbursts['PFLX_BAND_EPEAK'][s[i]],gbmbursts['PFLX_BAND_BETA'][s[i]])*AeffGBM*Aratio,outE)
bcpf[i]=np.trapz(gbmbursts['PFLX_BAND_AMPL'][s[i]]*grb_catalogs_copy.band(outE,gbmbursts['PFLX_BAND_ALPHA'][s[i]],gbmbursts['PFLX_BAND_EPEAK'][s[i]],gbmbursts['PFLX_BAND_BETA'][s[i]])*Aratio,outE)
if mo[i]=='PFLX_SBPL':
gbmcr[i]=np.trapz(gbmbursts['PFLX_SBPL_AMPL'][s[i]]*grb_catalogs_copy.sbpl(outE,gbmbursts['PFLX_SBPL_INDX1'][s[i]],gbmbursts['PFLX_SBPL_BRKEN'][s[i]],gbm['PFLX_SBPL_INDX2'][s[i]])*AeffGBM,outE)
pf[i]=np.trapz(gbmbursts['PFLX_SBPL_AMPL'][s[i]]*grb_catalogs_copy.sbpl(outE,gbmbursts['PFLX_SBPL_INDX1'][s[i]],gbmbursts['PFLX_SBPL_BRKEN'][s[i]],gbm['PFLX_SBPL_INDX2'][s[i]]),outE)
bccr[i]=np.trapz(gbmbursts['PFLX_SBPL_AMPL'][s[i]]*grb_catalogs_copy.sbpl(outE,gbmbursts['PFLX_SBPL_INDX1'][s[i]],gbmbursts['PFLX_SBPL_BRKEN'][s[i]],gbm['PFLX_SBPL_INDX2'][s[i]])*AeffGBM*Aratio,outE)
bcpf[i]=np.trapz(gbmbursts['PFLX_SBPL_AMPL'][s[i]]*grb_catalogs_copy.sbpl(outE,gbmbursts['PFLX_SBPL_INDX1'][s[i]],gbmbursts['PFLX_SBPL_BRKEN'][s[i]],gbm['PFLX_SBPL_INDX2'][s[i]])*Aratio,outE)
#plot Batse[64] against pf to see if they are the same
flux=gbmbursts['FLUX_BATSE_64'][s]
#define probability
#p = np.array((np.arange(pf.shape[0])+1)**(-1.0))
p = np.array((np.arange(pf.shape[0])+1.05)**(-0.5))
p=p/sum(p)
#randomly sample from the array of photon fluxes found above using our probability function found above so we draw more low flux bursts
#creating our "intrinsic" sample
r=np.random.choice(pf.shape[0], 1200, replace=True, p=p)
simgbmpfsample = np.array(pf[r])
simgbmcr = np.array(gbmcr[r])
simbcpfsample = np.array(bcpf[r])
simbccr = np.array(bccr[r])
#examining our probability distribution to be sure it is performing the eay we intend it to
print(min(p),max(p))
plot.hist(p)
## setup GBM
gbm_pointings = {'01': ('45:54:0','20:36:0'),
'02': ('45:6:0','45:18:0'),
'03': ('58:24:0','90:12:0'),
'04': ('314:54:0','45:12:0'),
'05': ('303:12:0','90:18:0'),
'06': ('3:24:0','89:48:0'),
'07': ('224:54:0','20:24:0'),
'08': ('224:36:0','46:12:0'),
'09': ('236:36:0','90:0:0'),
'10': ('135:12:0','45:36:0'),
'11': ('123:42:0','90:24:0'),
'12': ('183:42:0','90:18:0')}
fermi = Spacecraft(gbm_pointings,window=0.1)
res = 250
rr,dd = np.meshgrid(np.linspace(0,360,res,endpoint=False),np.linspace(-90,90,res))
exposure_positions = np.vstack([rr.ravel(),dd.ravel()])
gbm_exposures = np.array([[ detector.exposure(position[0],position[1]) for position in exposure_positions.T]
for detector in fermi.detectors])
## setup BurstCube
pointings = {'01': ('0:0:0','45:0:0'),
'02': ('90:0:0','45:0:0'),
'03': ('180:0:0','45:0:0'),
'04': ('270:0:0','45:0:0')}
burstcube = Spacecraft(pointings,window=0.1)
res = 250
rr,dd = np.meshgrid(np.linspace(0,360,res,endpoint=False),np.linspace(-90,90,res))
exposure_positions = np.vstack([rr.ravel(),dd.ravel()])
exposures = np.array([[ detector.exposure(position[0],position[1]) for position in exposure_positions.T]
for detector in burstcube.detectors])
#using SkyCoord to convert coordinates to degrees and solve for distances.
def separation(ra1,dec1,ra2,dec2):
c=SkyCoord(ra=ra1*u.deg,dec=dec1*u.deg)
d=SkyCoord(ra=ra2*u.deg,dec=dec2*u.deg)
dist=c.separation(d)
dist=dist.value
return dist
# now that GBM and BurstCube's pointings are set up we will throw GRBs at it and determine the exposure for each GRB.
#generate GRBs and throw them at GBM
def throw_grbs(fermi,minflux,maxflux):
nsims=int(np.round(len(simgbmpfsample)))
ra,dec=random_sky(nsims)
ra=np.array(ra)-180
dec=np.array(dec)
#GBM and BurstCube exposures for each random GRB.
randgbmexposures = np.array([[detector.exposure(ra[i],dec[i]) for i in range(nsims)] for detector in fermi.detectors])
randbcexposures = np.array([[detector.exposure(ra[i],dec[i]) for i in range(nsims)] for detector in burstcube.detectors])
#Order randgbmexposures into descending order
for column in randgbmexposures.T:
newrandgbm = -np.sort(-randgbmexposures.T)
gbmexposures = np.transpose(newrandgbm)
for col in randbcexposures.T:
newrandbc = -np.sort(-randbcexposures.T)
bcexposures = np.transpose(newrandbc)
#Select the second highest exposure value.
#We will use this to ensure the second highest exposure detector has a sig >4.5
secondhighestgbm = gbmexposures[1,:]
secondhighestbc = bcexposures[1,:]
return gbmexposures, bcexposures, secondhighestgbm, secondhighestbc, randgbmexposures, randbcexposures
#define the peak flux interval using the trigger catalog
msinterval = trigbursts['Trigger_Timescale'][s]
interval = msinterval/1000
#flux=simpf this is in ph/sec
flux=simgbmpfsample
minflux=min(flux)
maxflux=max(flux)
gbmexposures, bcexposures, secondhighestgbm, secondhighestbc, randgbmexposures, randbcexposures = throw_grbs(fermi,minflux,maxflux)
#Solve for the number of detected counts which will equal our source photons
sourcegbm = simgbmcr*secondhighestgbm*interval[r]
sourcebc = simbccr*secondhighestbc*interval[r]
#Assuming a background count rate. units: cts/s
bckgrd=300
#scale the background count rate for the second highest detector
scaledgbmbckgrd = bckgrd*secondhighestgbm*interval[r]
scaledbcbckgrd = bckgrd*secondhighestbc*interval[r]
#creating an array of zeros that I can manipulate to create an array of detected GRBs
detectgbm = np.zeros(len(simgbmpfsample))
detectbc = np.zeros(len(simbcpfsample))
#calculate the significance of the second highest exposure detector. If the significance is greater than 4.5 sigma than the burst is detectable.
for u in range(len(simgbmpfsample)):
sig = sourcegbm[u] / (math.sqrt(sourcegbm[u] + scaledgbmbckgrd[u]))
if sig > 4.5:
detectgbm[u] = 1.0
else:
detectgbm[u] = 0.0
for j in range(len(simbcpfsample)):
sig = sourcebc[j] / (math.sqrt(sourcebc[j] + scaledbcbckgrd[j]))
if sig > 4.5:
detectbc[j] = 1.0
else:
detectbc[j] = 0.0
#Creating plot of peak flux versus counts for real and simulated GBM
w=np.where(pf>0)[0]
wg = np.where(simgbmcr*detectgbm>0)[0]
wbc = np.where(simbccr*detectbc>0)[0]
fig=plot.figure(figsize=(20,5))
plot.subplot(1,2,1)
plot.hist(gbmcr[w],label='real GBM',bins=np.logspace(1,6,40),color='orange')
plot.hist(simgbmcr[wg],label='Simulated GBM',bins=np.logspace(1,6,40),alpha=0.7,color='blue')
plot.hist(simbccr[wbc],label='Simulated BurstCube',bins=np.logspace(1,6,40),alpha=0.7,color='green')
plot.xscale('log')
plot.legend()
plot.subplot(1,2,2)
#plot.hist(flux,label='All',bins=np.logspace(-1,2,40),color='green')
#pf has been gathered from the GBM catalog
plot.hist(pf[w],label='real GBM',bins=np.logspace(-1,4,40),color='orange')
# this is the simulated GBM
plot.hist(simgbmpfsample[wg],label='Simulated GBM',bins=np.logspace(-1,4,40),alpha=0.7,color='blue')
plot.hist(simbcpfsample[wbc],label='Simulated BurstCube',bins=np.logspace(-1,4,40),alpha=0.7,color='green')
#plot.hist(flux[w],label='BC',bins=np.logspace(-1,2,40),alpha=0.7,color='red')
plot.xscale('log')
plot.legend()
plot.show()
#solve for the detection fraction of BurstCube and Simulated GBM
detgbm = np.where(detectgbm == 1)[0]
ratiogbm = len(detgbm) / len(detectgbm)
print(ratiogbm)
detbc = np.where(detectbc == 1)[0]
ratiobc = len(detbc) / len(detectbc)
print(ratiobc)
#number of bursts BurstCube will see a year
bcbursts = ratiobc/ratiogbm *40
print(bcbursts)
```
| github_jupyter |
# Part1
```
from __future__ import unicode_literals
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import font_manager
from matplotlib.font_manager import FontProperties
font = FontProperties(fname=r"/root/anaconda2/envs/python3/lib/python3.6/site-packages/matplotlib/mpl-data/fonts/ttf/msyh.ttf")
import numpy as np
from sksurv.nonparametric import kaplan_meier_estimator
from sksurv.preprocessing import OneHotEncoder
from sksurv.linear_model import CoxnetSurvivalAnalysis#CoxPHSurvivalAnalysis
from sksurv.linear_model import CoxPHSurvivalAnalysis
from sksurv.metrics import concordance_index_censored
from sksurv.metrics import concordance_index_ipcw
from sklearn.feature_selection import SelectKBest
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
data1 = pd.read_csv("398908-3.csv", encoding = "GB2312")
#data1 = data1[data1["部件装上使用小时数"]!="00:00"]
data1["部件本次装机使用小时"] = data1["部件本次装机使用小时"].str.split(':').str[0].astype(int)
data1 = data1[data1["部件本次装机使用小时"]>0]
data1["IsPlanned"] = data1["非计划"]=="X"
print(data1["IsPlanned"].value_counts())
data_y = data1[["IsPlanned", "部件本次装机使用小时"]]
data_y["部件本次装机使用小时"].hist(bins=12, range=(0,60000))
data1["IsPlaneNew"] = data1["部件装上飞行小时数"]=="00:00"
data1["IsPartNew"] = data1["部件装上使用小时数"]=="00:00"
def CheckNew(p1,p2):
if p1 and p2:
return "PlaneNew-PartNew"
elif p1 and not p2:
return "PlaneNew-PartOld"
elif not p1 and p2:
return "PlaneOld-PartNew"
elif not p1 and not p2:
return "PlaneOld-PartOld"
#print([CheckNew(row["IsPlaneNew"], row["IsPartNew"]) for idx, row in data1.iterrows()])
data1["PlanePartType"] = [CheckNew(row["IsPlaneNew"], row["IsPartNew"]) for idx, row in data1.iterrows()]
data1["安装日期"] = pd.to_datetime(data1["安装日期"])
data1["安装年度"] = data1["安装日期"].dt.year
di = {"霍尼韦尔": "HONEYWELL"}
data1.replace({"最近送修公司": di}, inplace=True)
data1["最近送修公司"].fillna("Unknown", inplace=True)
data1["FH TSN"].fillna("00:00", inplace=True)
data1["部件装上飞行小时数"] = data1["部件装上飞行小时数"].str.split(':').str[0].astype(int)
data1["部件装上使用小时数"] = data1["部件装上使用小时数"].str.split(':').str[0].astype(int)
data1["部件装上飞行小时数-Range"] = pd.cut(data1['部件装上飞行小时数'], 8)
#data1["部件装上飞行循环数-Range"] = pd.cut(data1['部件装上飞行循环数'], 8)
data1["部件装上使用小时数-Range"] = pd.cut(data1['部件装上使用小时数'], 8)
#data1["部件装上使用循环数-Range"] = pd.cut(data1['部件装上使用循环数'], 8)
data1["CY TSN-Range"] = pd.cut(data1['CY TSN'], 8)
data1["FH TSN-Range"] = pd.cut(data1['FH TSN'], 8)
#data_x = data1[["机型","制造序列号","机号","参考类型","指令类型","序号","拆换原因","部件装上飞行循环数","部件装上使用循环数",
# "部件拆下飞行循环数","部件拆下使用循环数","装上序号","最近送修公司","CY TSN","FH TSN"]]
#data_x = data1[["机型","参考类型","指令类型","拆换原因","部件装上飞行循环数","部件装上使用循环数",
# "部件拆下飞行循环数","部件拆下使用循环数","CY TSN","FH TSN"]]
data_x = data1[["机型","安装年度","部件装上飞行小时数-Range","部件装上使用小时数-Range","FH TSN-Range", "最近送修公司","PlanePartType"]]
time, survival_prob = kaplan_meier_estimator(data_y["IsPlanned"], data_y["部件本次装机使用小时"])
plt.step(time, survival_prob, where="post")
plt.ylabel("est. probability of survival $\hat{S}(t)$")
plt.xlabel("time $t$")
# "机型","拆换年度","部件装上飞行小时数-Range","部件装上飞行循环数-Range","部件装上使用小时数-Range","部件装上使用循环数-Range","CY TSN-Range","FH TSN-Range", "最近送修公司"
#col = "机型"
#col = "参考类型"
col = "PlanePartType"
#col = "安装年度"
#col = "机型"
#print((data_x["最近送修公司"]!="上海航新") & (data_x["最近送修公司"]!="PP"))
y = data_y
x = data_x
for value in x[col].unique():
mask = x[col] == value
time_cell, survival_prob_cell = kaplan_meier_estimator(y["IsPlanned"][mask],
y["部件本次装机使用小时"][mask])
plt.step(time_cell, survival_prob_cell, where="post", label="%s (n = %d)" % (value, mask.sum()))
plt.ylabel("est. probability of survival $\hat{S}(t)$")
plt.xlabel("time $t$")
plt.legend(loc="upper right", prop=font)
# "机型","拆换年度","部件装上飞行小时数-Range","部件装上飞行循环数-Range","部件装上使用小时数-Range","部件装上使用循环数-Range","CY TSN-Range","FH TSN-Range", "最近送修公司"
#col = "机型"
#col = "参考类型"
col = "最近送修公司"
#col = "安装年度"
#col = "机型"
#print((data_x["最近送修公司"]!="上海航新") & (data_x["最近送修公司"]!="PP"))
filter1 = (data_x["最近送修公司"]!="上海航新") & (data_x["最近送修公司"]!="PP") & (data_x["最近送修公司"]!="海航技术")
y = data_y[filter1]
x = data_x[filter1]
for value in x[col].unique():
mask = x[col] == value
time_cell, survival_prob_cell = kaplan_meier_estimator(y["IsPlanned"][mask],
y["部件本次装机使用小时"][mask])
plt.step(time_cell, survival_prob_cell, where="post", label="%s (n = %d)" % (value, mask.sum()))
plt.ylabel("est. probability of survival $\hat{S}(t)$")
plt.xlabel("time $t$")
plt.legend(loc="upper right", prop=font)
#data_x.select_dtypes(exclude=['int','int64' 'float']).columns
data_x.describe()
#"部件装上飞行小时数-Range","部件装上飞行循环数-Range","部件装上使用小时数-Range","部件装上使用循环数-Range","CY TSN-Range","FH TSN-Range",
#
x = data_x.copy()
cat_features = ["机型", "安装年度","部件装上飞行小时数-Range","部件装上使用小时数-Range","FH TSN-Range", "最近送修公司","PlanePartType"]
for col in cat_features:
x[col] = x[col].astype('category')
data_x_numeric = OneHotEncoder().fit_transform(x[cat_features])
data_x_numeric.head()
null_columns=data1.columns[data1.isnull().any()]
data1[null_columns].isnull().sum()
#data_y = data_y.as_matrix()
y = data_y.to_records(index=False)
estimator = CoxPHSurvivalAnalysis() #CoxnetSurvivalAnalysis()
estimator.fit(data_x_numeric, y)
#pd.Series(estimator.coef_, index=data_x_numeric.columns)
prediction = estimator.predict(data_x_numeric)
result = concordance_index_censored(y["IsPlanned"], y["部件本次装机使用小时"], prediction)
print(result[0])
result = concordance_index_ipcw(y, y, prediction)
print(result[0])
def fit_and_score_features(X, y):
n_features = X.shape[1]
scores = np.empty(n_features)
m = CoxnetSurvivalAnalysis()
for j in range(n_features):
Xj = X[:, j:j+1]
m.fit(Xj, y)
scores[j] = m.score(Xj, y)
return scores
scores = fit_and_score_features(data_x_numeric.values, y)
pd.Series(scores, index=data_x_numeric.columns).sort_values(ascending=False)
x_new = data_x_numeric.loc[[46,77,200,593]]
#print(x_new)
data_x.loc[[46,77,200,593]]
y[[46,77,200,593]]
pred_surv = estimator.predict_survival_function(x_new)
for i, c in enumerate(pred_surv):
plt.step(c.x, c.y, where="post", label="Sample %d" % (i + 1))
plt.ylabel("est. probability of survival $\hat{S}(t)$")
plt.xlabel("time $t$")
plt.legend(loc="best")
pipe = Pipeline([('encode', OneHotEncoder()),
('select', SelectKBest(fit_and_score_features, k=3)),
('model', CoxPHSurvivalAnalysis())])
param_grid = {'select__k': np.arange(1, data_x_numeric.shape[1] -3)}
gcv = GridSearchCV(pipe, param_grid=param_grid, return_train_score=True, cv=3, iid=True)
gcv.fit(x, y)
pd.DataFrame(gcv.cv_results_).sort_values(by='mean_test_score', ascending=False)
pipe.set_params(**gcv.best_params_)
pipe.fit(x, y)
encoder, transformer, final_estimator = [s[1] for s in pipe.steps]
pd.Series(final_estimator.coef_, index=encoder.encoded_columns_[transformer.get_support()])
```
# Part2
```
from sklearn.model_selection import train_test_split
from sksurv.metrics import (concordance_index_censored,
concordance_index_ipcw,
cumulative_dynamic_auc)
data_x = data1[["安装年度","部件装上飞行小时数","部件装上使用小时数","FH TSN"]]
def df_to_sarray(df):
"""
Convert a pandas DataFrame object to a numpy structured array.
This is functionally equivalent to but more efficient than
np.array(df.to_array())
:param df: the data frame to convert
:return: a numpy structured array representation of df
"""
v = df.values
cols = df.columns
if False: # python 2 needs .encode() but 3 does not
types = [(cols[i].encode(), df[k].dtype.type) for (i, k) in enumerate(cols)]
else:
types = [(cols[i], df[k].dtype.type) for (i, k) in enumerate(cols)]
dtype = np.dtype(types)
z = np.zeros(v.shape, dtype)
for (i, k) in enumerate(z.dtype.names):
z[:,i] = v[:, i]
return z
y = data_y.to_records(index=False)
x_train, x_test, y_train, y_test = train_test_split(data_x, y, test_size=0.2)#, random_state=1)
x_train = x_train.values
x_test = x_test.values
y_events_train = y_train[y_train['IsPlanned']==False]
train_min, train_max = y_events_train["部件本次装机使用小时"].min(), y_events_train["部件本次装机使用小时"].max()
y_events_test = y_test[y_test['IsPlanned']==False]
test_min, test_max = y_events_test["部件本次装机使用小时"].min(), y_events_test["部件本次装机使用小时"].max()
assert train_min <= test_min < test_max < train_max, \
"time range or test data is not within time range of training data."
times = np.percentile(data_y["部件本次装机使用小时"], np.linspace(5, 95, 15))
print(times)
import matplotlib
matplotlib.matplotlib_fname()
num_columns = ["安装年度","部件装上飞行小时数","部件装上使用小时数","FH TSN"]
def plot_cumulative_dynamic_auc(risk_score, label, color=None):
auc, mean_auc = cumulative_dynamic_auc(y_train, y_test, risk_score, times)
plt.plot(times, auc, marker="o", color=color, label=label)
plt.legend(prop = font)
plt.xlabel("time时间",fontproperties=font)
plt.ylabel("time-dependent AUC")
plt.axhline(mean_auc, color=color, linestyle="--")
for i, col in enumerate(num_columns):
plot_cumulative_dynamic_auc(x_test[:, i], col, color="C{}".format(i))
ret = concordance_index_ipcw(y_train, y_test, x_test[:, i], tau=times[-1])
```
# Part3
```
data_x = data1[["机型","安装年度","部件装上飞行小时数","部件装上使用小时数","FH TSN", "最近送修公司","PlanePartType"]]
cat_features = ["机型", "安装年度", "最近送修公司","PlanePartType"]
for col in cat_features:
data_x[col] =data_x[col].astype('category')
times = np.percentile(data_y["部件本次装机使用小时"], np.linspace(5, 95, 15))
print(times)
estimator = CoxPHSurvivalAnalysis() #CoxnetSurvivalAnalysis()
estimator.fit(data_x_numeric, y)
from sklearn.pipeline import make_pipeline
y = data_y.to_records(index=False)
x_train, x_test, y_train, y_test = train_test_split(data_x, y, test_size=0.2)#, random_state=1)
cph = make_pipeline(OneHotEncoder(), CoxPHSurvivalAnalysis())
cph.fit(x_train, y_train)
result = concordance_index_censored(y_test["IsPlanned"], y_test["部件本次装机使用小时"], cph.predict(x_test))
print(result[0])
# estimate performance on training data, thus use `va_y` twice.
va_auc, va_mean_auc = cumulative_dynamic_auc(y_train, y_test, cph.predict(x_test), times)
plt.plot(times, va_auc, marker="o")
plt.axhline(va_mean_auc, linestyle="--")
plt.xlabel("time from enrollment")
plt.ylabel("time-dependent AUC")
plt.grid(True)
print(y_test["部件本次装机使用小时"])
print(cph.predict_survival_function(x_test))
print(y_test["部件本次装机使用小时"] - cph.predict(x_test))
```
# Part4
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import pandas
import seaborn as sns
from sklearn.model_selection import ShuffleSplit, GridSearchCV
from sksurv.datasets import load_veterans_lung_cancer
from sksurv.column import encode_categorical
from sksurv.metrics import concordance_index_censored
from sksurv.svm import FastSurvivalSVM
sns.set_style("whitegrid")
data_x = data1[["机型","安装年度","部件装上飞行小时数","部件装上使用小时数","FH TSN", "最近送修公司","PlanePartType"]]
cat_features = ["机型", "安装年度", "最近送修公司","PlanePartType"]
for col in cat_features:
data_x[col] = data_x[col].astype('category')
x = OneHotEncoder().fit_transform(data_x)#encode_categorical(data_x)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)#, random_state=1)
estimator = FastSurvivalSVM(optimizer="rbtree",rank_ratio=0.0, max_iter=1000, tol=1e-6, random_state=0, alpha=2.**-6)
estimator.fit(x_train, y_train)
prediction = estimator.predict(x_test)
result = concordance_index_censored(y_test["IsPlanned"], y_test["部件本次装机使用小时"], prediction)
print(result[0])
estimator.predict(x_train)
estimator = FastSurvivalSVM(optimizer="rbtree", max_iter=1000, tol=1e-6, random_state=0)
def score_survival_model(model, X, y):
prediction = model.predict(X)
result = concordance_index_censored(y['IsPlanned'], y['部件本次装机使用小时'], prediction)
return result[0]
param_grid = {'alpha': 2. ** np.arange(-12, 13, 2)}
cv = ShuffleSplit(n_splits=20, test_size=0.4, random_state=0)
gcv = GridSearchCV(estimator, param_grid, scoring=score_survival_model,
n_jobs=12, iid=False, refit=False,
cv=cv)
param_grid
import warnings
y = data_y.to_records(index=False)
warnings.filterwarnings("ignore", category=UserWarning)
gcv = gcv.fit(x, y)
gcv.best_score_, gcv.best_params_
def plot_performance(gcv):
n_splits = gcv.cv.n_splits
cv_scores = {"alpha": [], "test_score": [], "split": []}
order = []
for i, params in enumerate(gcv.cv_results_["params"]):
name = "%.5f" % params["alpha"]
order.append(name)
for j in range(n_splits):
vs = gcv.cv_results_["split%d_test_score" % j][i]
cv_scores["alpha"].append(name)
cv_scores["test_score"].append(vs)
cv_scores["split"].append(j)
df = pandas.DataFrame.from_dict(cv_scores)
_, ax = plt.subplots(figsize=(11, 6))
sns.boxplot(x="alpha", y="test_score", data=df, order=order, ax=ax)
_, xtext = plt.xticks()
for t in xtext:
t.set_rotation("vertical")
plot_performance(gcv)
from sksurv.svm import FastKernelSurvivalSVM
from sksurv.kernels import clinical_kernel
x_train, x_test, y_train, y_test = train_test_split(data_x, y, test_size=0.5)#, random_state=1)
kernel_matrix = clinical_kernel(x_train)
kssvm = FastKernelSurvivalSVM(optimizer="rbtree", kernel="precomputed", random_state=0, alpha=2.**-6)
kssvm.fit(kernel_matrix, y_train)
x_test.shape
kernel_matrix = clinical_kernel(x_test[0:552])
prediction = kssvm.predict(kernel_matrix)
result = concordance_index_censored(y_test[0:552]["IsPlanned"], y_test[0:552]["部件本次装机使用小时"], prediction)
print(result[0])
kernel_matrix = clinical_kernel(data_x)
kssvm = FastKernelSurvivalSVM(optimizer="rbtree", kernel="precomputed", random_state=0, alpha=2.**-12)
kgcv = GridSearchCV(kssvm, param_grid, score_survival_model,
n_jobs=12, iid=False, refit=False,
cv=cv)
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
kgcv = kgcv.fit(kernel_matrix, y)
kgcv.best_score_, kgcv.best_params_
plot_performance(kgcv)
```
| github_jupyter |
```
!pip install torch
!pip3 install torchvision
import torch
from torch import nn
import torch.nn.functional as F
import os
from torch.utils.data import Dataset
import cv2
from tqdm import tqdm
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
# Load the Drive helper and mount
from google.colab import drive
# This will prompt for authorization.
drive.mount('/content/drive')
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
class UnetModel(nn.Module):
def conv(self, in_channels, out_channels):
block = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=(1,1)),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, kernel_size=3,padding=(1,1)),
nn.BatchNorm2d(out_channels),
nn.ReLU()
)
return block
def up_conv(self, in_channels, out_channels):
block = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(in_channels, out_channels, kernel_size=3,padding=(1,1)),
nn.BatchNorm2d(out_channels),
nn.ReLU()
)
return block
def __init__(self, in_channel, out_channel):
super(UnetModel, self).__init__()
self.conv1 = self.conv(in_channel,64)
self.conv1_maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = self.conv(64, 128)
self.conv2_maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3 = self.conv(128, 256)
self.conv3_maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv4 = self.conv(256, 512)
self.conv4_maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv5 = self.conv(512, 1024)
self.up_conv4 = self.up_conv(1024, 512)
self.up4_conv =self.conv(1024,512)
self.up_conv3 = self.up_conv(512, 256)
self.up3_conv = self.conv(512,256)
self.up_conv2 = self.up_conv(256,128)
self.up2_conv = self.conv(256,128)
self.up_conv1 = self.up_conv(128,64)
self.up1_conv = self.conv(128,64)
self.conv_1x1 = nn.Conv2d(64,out_channel,kernel_size=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out1 = self.conv1(x)
out2 = self.conv1_maxpool(out1)
out2 = self.conv2(out2)
out3 = self.conv2_maxpool(out2)
out3 = self.conv3(out3)
out4 = self.conv3_maxpool(out3)
out4 = self.conv4(out4)
out5 = self.conv4_maxpool(out4)
out5 = self.conv5(out5)
exp5 = self.up_conv4(out5)
exp5 = torch.cat((out4, exp5), dim=1)
exp5 = self.up4_conv(exp5)
exp4 = self.up_conv3(exp5)
exp4 = torch.cat((out3, exp4), dim=1)
exp4 = self.up3_conv(exp4)
exp3 = self.up_conv2(exp4)
exp3 = torch.cat((out2, exp3), dim=1)
exp3 = self.up2_conv(exp3)
exp2 = self.up_conv1(exp3)
exp2 = torch.cat((out1, exp2), dim=1)
exp2 = self.up1_conv(exp2)
exp1 = self.conv_1x1(exp2)
exp1 = self.sigmoid(exp1)
return exp1
class MyDataset(Dataset):
def __init__(self, len, home_directory, noise=2, mode="Train"):
self.len = len
self.examples = []
self.iter_index = 0
self.X = torch.empty((len, 128,128))
self.Y = torch.empty((len,128,128), dtype=torch.long)
self.input_directory = os.path.join(home_directory, mode, 'input')
self.mask_directory = os.path.join(home_directory, mode, 'mask')
print("dataset input path {}".format(self.input_directory))
print("dataset mask path {}".format(self.mask_directory))
input_names = os.listdir(self.input_directory)
input_names.sort()
mask_names = os.listdir(self.mask_directory)
mask_names.sort()
self.set_dataset(self.input_directory, input_names, True)
self.set_dataset(self.mask_directory, mask_names, False)
def set_dataset(self, directory, names, input_na = True):
# print(self.len)
# print(len(names))
# print(names)
index = 0
for name in names:
img_path = directory + '/' + name
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img =img/255
resize_img = cv2.resize(img, (128,128))
if input_na:
# print(index)
self.X[index] = torch.tensor(resize_img)
else:
resize_img = torch.from_numpy(resize_img).float()
self.Y[index] = resize_img
index += 1
def __len__(self):
return self.len
def __getitem__(self, idx):
return (self.X[idx], self.Y[idx])
dataset_train = MyDataset(60,'/content/drive/My Drive/A3/cat_data/cat_data')
trainloader = torch.utils.data.DataLoader(dataset_train, batch_size=20, shuffle=True)
# dataset_test = MyDataset(20,'/content/drive/My Drive/A3/cat_data/cat_data', 'Test')
model = UnetModel(1, 1)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.7)
epochs = 10
model.train()
a = True
for e in range(epochs):
running_loss = 0
for images, labels in tqdm(trainloader):
optimizer.zero_grad()
images = images.unsqueeze(1)
labels = labels.unsqueeze(1)
labels = labels.float()
log_ps = model(images)
loss = criterion(log_ps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(f"Traning loss: {running_loss/len(trainloader)}")
dataset_test = MyDataset(21,'/content/drive/My Drive/A3/cat_data/cat_data', mode='Test')
testloader = torch.utils.data.DataLoader(dataset_test, batch_size=20, shuffle=True)
with torch.no_grad():
for images, labels in tqdm(testloader):
optimizer.zero_grad()
images = images.unsqueeze(1)
labels = labels.unsqueeze(1)
log_ps = model(images)
loss = criterion(log_ps, labels)
running_loss += loss.item()
print(f"Test loss: {running_loss/len(testloader)}")
```
| github_jupyter |
## Conditional Probability
- Conditional probability has many applications, we learn it by mentioning its application in text analysis
- Assume this small dataset is given:
<img src="spam_ham_data_set.png" width="600" height="600">
## Question: What is the probability that an email be spam? What is the probability that an email be ham?
- $P(spam) = ?$
- $P(ham) = ?$
## Question: We know an email is spam, what is the probability that password be a word in it? (What is the frequency of password in a spam email?)
- Hint: Create the dictionary of spam where its key would be unique words in spam emails and the value shows the occurance of that word
```
spam = {
"password": 2,
"review": 1,
"send": 3,
"us": 3,
"your": 3,
"account": 1
}
```
$P(password \mid spam) = 2/(2+1+3+3+3+1) = 2/13$
```
# or
p_password_given_spam = spam['password']/sum(spam.values())
print(p_password_given_spam)
```
## Question: We know an email is ham, what is the probability that password be a word in it? (What is the frequency of password in a ham email?)
- Hint: Create the dictionary of ham where its key would be unique words in spam emails and the value shows the occurance of that word
```
ham = {
"password": 1,
"review": 2,
"send": 1,
"us": 1,
"your": 2,
"account": 0
}
```
$P(password \mid ham) = 1/(1+2+1+1+1+0) = 1/6$
```
# or
p_password_given_ham = ham['password']/sum(ham.values())
print(p_password_given_ham)
```
## Question: Assume we have seen password in an email, what is the probability that the email be spam?
- $P(spam \mid password) = ?$
- Hint: Use Bayes' rule:
$P(spam \mid password) = (P(password \mid spam) P(spam))/ P(password)$
$P(password) = P(password \mid spam) P(spam) + P(password \mid ham) P(ham)$
```
p_spam = 4/6
p_ham = 2/6
p_password = p_password_given_spam*p_spam + p_password_given_ham*p_ham
print(p_password)
p_spam_given_password = p_password_given_spam*p_spam/p_password
print(p_spam_given_password)
```
## Activity: Do the above computation for each word by writing code
```
p_spam = 4/6
p_ham = 2/6
ls1 = []
ls2 = []
for i in spam:
print(i)
p_word_given_spam = # TODO
p_word_given_ham = # TODO
# obtain the probability of each word by assuming the email is spam
# obtain the probability of each word by assuming the email is ham
#TODO
# obtain the probability that for a seen word it belongs to spam email
# obtain the probability that for a seen word it belongs to ham email
#TODO
```
## Quiz: Compute the expected value of a fair dice
By Definition, the expected value of random events (a random variable) like rolling a dice is computed as:
$E(X) = \sum_{i=1}^{6}i * P(dice = i)$
<img src="dice.jpg" width="100" height="100">
1- For a fair dice,
compute the probability that when roll the dice then 1 apprears (P(dice = 1)),
compute the probability that when roll the dice then 2 apprears (P(dice = 2)),
.
.
.
compute the probability that when roll the dice then 2 apprears (P(dice = 6))
2- Compute $E(X)$ from the above steps.
### Answer:
The expected value for a fair dice is:
$E(X) = (1*1/6) + (2*1/6) + (3*1/6)+ (4*1/6) + (5*1/6) + (6*1/6)$
$E(X) = 3.5$
```
# We can show that E(X) is the mean of the random variable
import numpy as np
# lets roll the dice 1000 times
dice = np.random.randint(low=1.0, high=7.0, size=1000)
print(dice)
# Compute the mean of dice list
print(np.mean(dice))
print(sum(dice)/len(dice))
```
| github_jupyter |
# Chapter 12 - Principal Components Analysis with scikit-learn
This notebook contains code accompanying Chapter 12 Principal Components Analysis with scikit-learn in *Practical Discrete Mathematics* by Ryan T. White and Archana Tikayat Ray.
## Eigenvalues and eigenvectors, orthogonal bases
### Example: Pizza nutrition
```
import pandas as pd
dataset = pd.read_csv('pizza.csv')
dataset.head()
```
### Example: Computing eigenvalues and eigenvectors
```
import numpy as np
A = np.array([[3,1], [1,3]])
l, v = np.linalg.eig(A)
print("The eigenvalues are:\n ",l)
print("The eigenvectors are:\n ", v)
```
## The scikit-learn implementation of PCA
We will start by importing the dataset and then dropping the brand column from it. This is done to make sure that all our feature variables are numbers and hence can be scaled/normalized. We will then create another variable called target which will contain the names of the brands of pizzas.
```
import pandas as pd
dataset = pd.read_csv('pizza.csv')
#Dropping the brand name column before standardizing the data
df_num = dataset.drop(["brand"], axis=1)
# Setting the brand name column as the target variable
target = dataset['brand']
```
Now that we have the dataset in order, we will then normalize the columns of the dataset to make sure that the mean for a variable is 0 and the variance is 1 and then we will run PCA on the dataset.
```
#Scaling the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(df_num)
scaled_data = scaler.transform(df_num)
#Applying PCA to the scaled data
from sklearn.decomposition import PCA
#Reducing the dimesions to 2 components so that we can have a 2D visualization
pca = PCA(n_components = 2)
pca.fit(scaled_data)
#Applying to our scaled dataset
scaled_data_pca = pca.transform(scaled_data)
#Check the shape of the original dataset and the new dataset
print("The dimensions of the original dataset is: ", scaled_data.shape)
print("The dimensions of the dataset after performing PCA is: ", scaled_data_pca.shape)
```
Now we have reduced our 7-dimensional dataset to its 2 principal components as can be seen from the dimensions shown above. We will move forward with plotting the principal components to check whether 2 principal components were enough to capture the variability in the dataset – the different nutritional content of pizzas produced by different companies.
```
#Plotting the principal components
import matplotlib.pyplot as plt
import seaborn as sns
sns.scatterplot(scaled_data_pca[:,0], scaled_data_pca[:,1], target)
plt.legend(loc="best")
plt.gca().set_aspect("equal")
plt.xlabel("Principal Component 1")
plt.ylabel("Principal Component 2")
plt.show()
```
Now, we will move on to perform PCA in a way where we do not choose the number of desired principal components, rather we choose the number of principal components that add up to a certain desired variance. The Python implementation of this is very similar to the previous way with very slight changes to the code as shown below.
```
import pandas as pd
dataset = pd.read_csv('pizza.csv')
#Dropping the brand name column before standardizing the data
df_num = dataset.drop(["brand"], axis=1)
# Setting the brand name column as the target variable
target = dataset['brand']
#Scaling the data (Step 1)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(df_num)
scaled_data = scaler.transform(df_num)
#Applying PCA to the scaled data
from sklearn.decomposition import PCA
#Setting the variance to 0.95
pca = PCA(n_components = 0.95)
pca.fit(scaled_data)
#Applying to our scaled dataset
scaled_data_pca = pca.transform(scaled_data)
#Check the shape of the original dataset and the new dataset
print("The dimensions of the original dataset are: ", scaled_data.shape)
print("The dimensions of the dataset after performing PCA is: ", scaled_data_pca.shape)
```
As we can see from the above output, 3 principal components are required to capture 95% of the variance in the dataset. This means that by choosing 2 principal directions previously, we were capturing < 95% of the variance in the dataset. Despite capturing < 95% of the variance, we were able to visualize the fact that the pizzas produced by different companies have different nutritional contents.
## An application to real-world data
The first step is to import the data as shown below. It is going to take some time since it is a big dataset, hence hang tight. The dataset contains images of 70000 digits (0-9) where each image has 784 features.
```
#Importing the dataset
from sklearn.datasets import fetch_openml
mnist_data = fetch_openml('mnist_784', version = 1)
# Choosing the independent (X) and dependent variables (y)
X,y = mnist_data["data"], mnist_data["target"]
```
Now that we have the dataset imported, we will move on to visualize the image of a digit to get familiar with the dataset. For visualization, we will use the `matplotlib` library. We will visualize the 50000th digit image. Feel free to check out other digit images of your choice – make sure to use an index between 0 and 69999. We will set colormap to "binary" to output a grayscale image.
```
#Plotting one of the digits
import matplotlib.pyplot as plt
plt.figure(1)
#Plotting the 50000th digit
digit = X[50000]
#Reshaping the 784 features into a 28x28 matrix
digit_image = digit.reshape(28,28)
plt.imshow(digit_image, cmap='binary')
plt.show()
```
Next, we will apply PCA to this dataset to reduce its dimension from $28*28=784$ to a lower number. We will plot the proportion of the variation that is reflected by PCA-reduced dimensional data of different dimensions.
```
#Scaling the data
from sklearn.preprocessing import StandardScaler
scaled_mnist_data = StandardScaler().fit_transform(X)
print(scaled_mnist_data.shape)
#Applying PCA to ur dataset
from sklearn.decomposition import PCA
pca = PCA(n_components=784)
mnist_data_pca = pca.fit_transform(scaled_mnist_data)
#Calculating cumulative variance captured by PCs
import numpy as np
variance_percentage = pca.explained_variance_/np.sum(pca.explained_variance_)
#Calculating cumulative variance
cumulative_variance = np.cumsum(variance_percentage)
#Plotting cumalative variance
import matplotlib.pyplot as plt
plt.figure(2)
plt.plot(cumulative_variance)
plt.xlabel('Number of principal components')
plt.ylabel('Cumulative variance explained by PCs')
plt.grid()
plt.show()
```
| github_jupyter |
<a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a>
$ \newcommand{\bra}[1]{\langle #1|} $
$ \newcommand{\ket}[1]{|#1\rangle} $
$ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
$ \newcommand{\dot}[2]{ #1 \cdot #2} $
$ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
$ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
$ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
$ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
$ \newcommand{\mypar}[1]{\left( #1 \right)} $
$ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
$ \newcommand{\onehalf}{\frac{1}{2}} $
$ \newcommand{\donehalf}{\dfrac{1}{2}} $
$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
$ \newcommand{\vzero}{\myvector{1\\0}} $
$ \newcommand{\vone}{\myvector{0\\1}} $
$ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $
$ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
$ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
$ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $
$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
$ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
$ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $
$ \newcommand{\greenbit}[1] {\mathbf{{\color{green}#1}}} $
$ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}#1}}} $
$ \newcommand{\redbit}[1] {\mathbf{{\color{red}#1}}} $
$ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}#1}}} $
$ \newcommand{\blackbit}[1] {\mathbf{{\color{black}#1}}} $
<font style="font-size:28px;" align="left"><b> <font color="blue"> Solutions for </font>Phase Kickback </b></font>
<br>
_prepared by Abuzer Yakaryilmaz_
<br><br>
<a id="task1"></a>
<h3> Task 1</h3>
Create a quantum circuit with two qubits, say $ q[1] $ and $ q[0] $ in the reading order of Qiskit.
We start in quantum state $ \ket{01} $:
- set the state of $ q[1] $ to $ \ket{0} $, and
- set the state of $ q[0] $ to $ \ket{1} $.
Apply Hadamard to both qubits.
Apply CNOT operator, where the controller qubit is $ q[1] $ and the target qubit is $ q[0] $.
Apply Hadamard to both qubits.
Measure the outcomes.
<h3> Solution </h3>
```
# import all necessary objects and methods for quantum circuits
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
q = QuantumRegister(2,"q") # quantum register with 2 qubits
c = ClassicalRegister(2,"c") # classical register with 2 bits
qc = QuantumCircuit(q,c) # quantum circuit with quantum and classical registers
# the up qubit is in |0>
# set the down qubit to |1>
qc.x(q[0]) # apply x-gate (NOT operator)
qc.barrier()
# apply Hadamard to both qubits.
qc.h(q[0])
qc.h(q[1])
# apply CNOT operator, where the controller qubit is the up qubit and the target qubit is the down qubit.
qc.cx(1,0)
# apply Hadamard to both qubits.
qc.h(q[0])
qc.h(q[1])
# measure both qubits
qc.measure(q,c)
# draw the circuit in Qiskit reading order
display(qc.draw(output='mpl',reverse_bits=True))
# execute the circuit 100 times in the local simulator
job = execute(qc,Aer.get_backend('qasm_simulator'),shots=100)
counts = job.result().get_counts(qc)
print(counts)
```
<a id="task2"></a>
<h3> Task 2 </h3>
Create a circuit with 7 qubits, say $ q[6],\ldots,q[0] $ in the reading order of Qiskit.
Set the states of the top six qubits to $ \ket{0} $.
Set the state of the bottom qubit to $ \ket{1} $.
Apply Hadamard operators to all qubits.
Apply CNOT operator ($q[1]$,$q[0]$)
<br>
Apply CNOT operator ($q[4]$,$q[0]$)
<br>
Apply CNOT operator ($q[5]$,$q[0]$)
Apply Hadamard operators to all qubits.
Measure all qubits.
For each CNOT operator, is there a phase-kickback effect?
<h3> Solution </h3>
```
# import all necessary objects and methods for quantum circuits
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
# Create a circuit with 7 qubits.
q = QuantumRegister(7,"q") # quantum register with 7 qubits
c = ClassicalRegister(7) # classical register with 7 bits
qc = QuantumCircuit(q,c) # quantum circuit with quantum and classical registers
# the top six qubits are already in |0>
# set the bottom qubit to |1>
qc.x(0) # apply x-gate (NOT operator)
# define a barrier
qc.barrier()
# apply Hadamard to all qubits.
for i in range(7):
qc.h(q[i])
# define a barrier
qc.barrier()
# apply CNOT operator (q[1],q[0])
# apply CNOT operator (q[4],q[0])
# apply CNOT operator (q[5],q[0])
qc.cx(q[1],q[0])
qc.cx(q[4],q[0])
qc.cx(q[5],q[0])
# define a barrier
qc.barrier()
# apply Hadamard to all qubits.
for i in range(7):
qc.h(q[i])
# define a barrier
qc.barrier()
# measure all qubits
qc.measure(q,c)
# draw the circuit in Qiskit reading order
display(qc.draw(output='mpl',reverse_bits=True))
# execute the circuit 100 times in the local simulator
job = execute(qc,Aer.get_backend('qasm_simulator'),shots=100)
counts = job.result().get_counts(qc)
print(counts)
```
| github_jupyter |
```
import numpy as np
import torch
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
device
import torchvision
from torchvision import models
from torchvision import transforms
import os
import glob
from PIL import Image
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
from torchvision import models
from random import randint
# tensor -> PIL image
unloader = transforms.ToPILImage()
# flip = transforms.RandomHorizontalFlip(p=1)
class ToyDataset(Dataset):
def __init__(self, dark_img_dir, light_img_dir):
self.dark_img_dir = dark_img_dir
self.light_img_dir = light_img_dir
self.n_dark = len(os.listdir(self.dark_img_dir))
self.n_light = len(os.listdir(self.light_img_dir))
def __len__(self):
return min(self.n_dark, self.n_light)
def __getitem__(self, idx):
filename = os.listdir(self.light_img_dir)[idx]
light_img_path = f"{self.light_img_dir}{filename}"
light = Image.open(light_img_path).convert("RGB")
dark_img_path = f"{self.dark_img_dir}{filename}"
dark = Image.open(dark_img_path).convert("RGB")
# if random()>0.5:
# light = transforms.functional.rotate(light, 30)
# dark = transforms.functional.rotate(dark, 30)
# if random()>0.5:
# light = transforms.functional.rotate(light, 330)
# dark = transforms.functional.rotate(dark, 330)
# if random()>0.5:
# light = flip(light)
# dark = flip(dark)
s = randint(600, 700)
transform = transforms.Compose([
transforms.Resize(s),
transforms.CenterCrop(512),
transforms.ToTensor(),
])
light = transform(light)
dark = transform(dark)
return dark, light
batch_size = 1
train_dark_dir = f"./data/train/dark/"
train_light_dir = f"./data/train/light/"
training_set = ToyDataset(train_dark_dir,train_light_dir)
training_generator = DataLoader(training_set, batch_size=batch_size, shuffle=True)
val_dark_dir = f"./data/test/dark/"
val_light_dir = f"./data/test/light/"
validation_set = ToyDataset(val_dark_dir, val_light_dir)
validation_generator = DataLoader(validation_set, batch_size=batch_size, shuffle=True)
# generate training images
n = 1
cycle = 5
dark_save_path = "./data_augment/train/dark/"
light_save_path = "./data_augment/train/light/"
for i in range(cycle):
for item in training_generator:
dark, light = item
dark = unloader(dark[0,])
light = unloader(light[0,])
dark.save(dark_save_path+f"{n}.jpg")
light.save(light_save_path+f"{n}.jpg")
n += 1
# generate testing images
n = 1
cycle = 1
dark_save_path = "./data_augment/test/dark/"
light_save_path = "./data_augment/test/light/"
for i in range(cycle):
for item in validation_generator:
dark, light = item
dark = unloader(dark[0,])
light = unloader(light[0,])
dark.save(dark_save_path+f"{n}.jpg")
light.save(light_save_path+f"{n}.jpg")
n += 1
```
| github_jupyter |
## Exercise 2 - Running a power flow calculation and adding scenario data for electric vehicles to the grid
**The goals for this exercise are:**
- load the grid model from exercise 1
- run a power flow calculation
- display transformer, line and bus results
- determine maximum line loading and minimum bus voltage
- create 65 loads with random power demands between 0 and 11 kW
- each load represents an 11 kW charging point for electric vehicles
- connect these loads to random buses to model a future scenario for the example grid
- run a power flow calculation again and compare the results before and after connecting the charging points to the grid
**Helpful ressources for this exercise:**
- https://github.com/e2nIEE/pandapower/blob/master/tutorials/minimal_example.ipynb
- https://github.com/e2nIEE/pandapower/blob/develop/tutorials/create_simple.ipynb
- https://github.com/e2nIEE/pandapower/blob/develop/tutorials/powerflow.ipynb
### Step 1 - load the grid model of exercise 1 from the json file
hint: use pp.from_json(FILENAME.json). You need the import the pandapower module again.
### Step 2 - run a power flow calculation
### Step 3 - display the transformer results
### Step 4 - display the line results
### Step 5 - display the bus results
### Step 6 - display the maximum line loading
hint: you can determine the maximum value of a column by running net.TABLE_NAME.COLUMN_NAME.max()
### Step 7 - display the minimum bus voltage
hint: you can determine the minimum value of a column by running net.TABLE_NAME.COLUMN_NAME.min()
### Step 8 - create 65 loads with random power demands between 0 and 11 kW and connect them to random buses
hint: you just need to fill in the "create load" command in the for loop.
```
# just run this cell to create the list of 50 random power demand values
import numpy as np
np.random.seed(0)
p_mw_values = list(np.random.randint(0, 12, 65)/1000)
print(p_mw_values)
for p_mw in p_mw_values:
bus = np.random.randint(2,7,1)[0] # chooses a bus index between 2 and 6
load = #<replace this by a create_load command. Set the parameters bus=bus p_mw=p_mw and name="charging_point">
net.load
```
### Step 9 - run a power flow calculation again, to get the new results for the grid with charging points
### Step 10 - determine the transformer loading, maximum line loading and minimum bus voltage and compare them to the results without charging points
### Step 11 - save the grid model as a json file with a new name
hint: use the method pp.to_json(net, "FILENAME.json").
| github_jupyter |
# Global Imports
```
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.pyplot import subplots
```
### External Package Imports
```
import os as os
import pickle as pickle
import pandas as pd
```
### Module Imports
Here I am using a few of my own packages, they are availible on Github under [__theandygross__](https://github.com/theandygross) and should all be instalable by <code>python setup.py</code>.
```
from Stats.Scipy import *
from Stats.Survival import *
from Helpers.Pandas import *
from Helpers.LinAlg import *
from Figures.FigureHelpers import *
from Figures.Pandas import *
from Figures.Boxplots import *
from Figures.Regression import *
#from Figures.Survival import draw_survival_curve, survival_and_stats
#from Figures.Survival import draw_survival_curves
#from Figures.Survival import survival_stat_plot
import Data.Firehose as FH
from Data.Containers import get_run
```
### Import Global Parameters
* These need to be changed before you will be able to sucessfully run this code
```
import NotebookImport
from Global_Parameters import *
```
### Tweaking Display Parameters
```
pd.set_option('precision', 3)
pd.set_option('display.width', 300)
plt.rcParams['font.size'] = 12
'''Color schemes for paper taken from http://colorbrewer2.org/'''
colors = plt.rcParams['axes.color_cycle']
colors_st = ['#CA0020', '#F4A582', '#92C5DE', '#0571B0']
colors_th = ['#E66101', '#FDB863', '#B2ABD2', '#5E3C99']
import seaborn as sns
sns.set_context('paper',font_scale=1.5)
sns.set_style('white')
```
### Read in All of the Expression Data
This reads in data that was pre-processed in the [./Preprocessing/init_RNA](../Notebooks/init_RNA.ipynb) notebook.
```
codes = pd.read_hdf(RNA_SUBREAD_STORE, 'codes')
matched_tn = pd.read_hdf(RNA_SUBREAD_STORE, 'matched_tn')
rna_df = pd.read_hdf(RNA_SUBREAD_STORE, 'all_rna')
data_portal = pd.read_hdf(RNA_STORE, 'matched_tn')
genes = data_portal.index.intersection(matched_tn.index)
pts = data_portal.columns.intersection(matched_tn.columns)
rna_df = rna_df.ix[genes]
matched_tn = matched_tn.ix[genes, pts]
```
### Read in Gene-Sets for GSEA
```
from Data.Annotations import unstack_geneset_csv
gene_sets = unstack_geneset_csv(GENE_SETS)
gene_sets = gene_sets.ix[rna_df.index].fillna(0)
```
Initialize function for calling model-based gene set enrichment
```
from rpy2 import robjects
from rpy2.robjects import pandas2ri
pandas2ri.activate()
mgsa = robjects.packages.importr('mgsa')
gs_r = robjects.ListVector({i: robjects.StrVector(list(ti(g>0))) for i,g in
gene_sets.iteritems()})
def run_mgsa(vec):
v = robjects.r.c(*ti(vec))
r = mgsa.mgsa(v, gs_r)
res = pandas2ri.ri2pandas(mgsa.setsResults(r))
return res
```
### Function Tweaks
Running the binomial test across 450k probes in the same test space, we rerun the same test a lot. Here I memoize the function to cache results and not recompute them. This eats up a couple GB of memory but should be reasonable.
```
from scipy.stats import binom_test
def memoize(f):
memo = {}
def helper(x,y,z):
if (x,y,z) not in memo:
memo[(x,y,z)] = f(x,y,z)
return memo[(x,y,z)]
return helper
binom_test_mem = memoize(binom_test)
def binomial_test_screen(df, fc=1.5, p=.5):
"""
Run a binomial test on a DataFrame.
df:
DataFrame of measurements. Should have a multi-index with
subjects on the first level and tissue type ('01' or '11')
on the second level.
fc:
Fold-chance cutoff to use
"""
a, b = df.xs('01', 1, 1), df.xs('11', 1, 1)
dx = a - b
dx = dx[dx.abs() > np.log2(fc)]
n = dx.count(1)
counts = (dx > 0).sum(1)
cn = pd.concat([counts, n], 1)
cn = cn[cn.sum(1) > 0]
b_test = cn.apply(lambda s: binom_test_mem(s[0], s[1], p), axis=1)
dist = (1.*cn[0] / cn[1])
tab = pd.concat([cn[0], cn[1], dist, b_test],
keys=['num_ox', 'num_dx', 'frac', 'p'],
axis=1)
return tab
```
Added linewidth and number of bins arguments. This should get pushed eventually.
```
def draw_dist(vec, split=None, ax=None, legend=True, colors=None, lw=2, bins=300):
"""
Draw a smooth distribution from data with an optional splitting factor.
"""
_, ax = init_ax(ax)
if split is None:
split = pd.Series('s', index=vec.index)
colors = {'s': colors} if colors is not None else None
for l,v in vec.groupby(split):
if colors is None:
smooth_dist(v, bins=bins).plot(label=l, lw=lw, ax=ax)
else:
smooth_dist(v, bins=bins).plot(label=l, lw=lw, ax=ax, color=colors[l])
if legend and len(split.unique()) > 1:
ax.legend(loc='upper left', frameon=False)
```
Some helper functions for fast calculation of odds ratios on matricies.
```
def odds_ratio_df(a,b):
a = a.astype(int)
b = b.astype(int)
flip = lambda v: (v == 0).astype(int)
a11 = (a.add(b) == 2).sum(axis=1)
a10 = (a.add(flip(b)) == 2).sum(axis=1)
a01 = (flip(a).add(b) == 2).sum(axis=1)
a00 = (flip(a).add(flip(b)) == 2).sum(axis=1)
odds_ratio = (1.*a11 * a00) / (1.*a10 * a01)
df = pd.concat([a00, a01, a10, a11], axis=1,
keys=['00','01','10','11'])
return odds_ratio, df
def fet(s):
odds, p = stats.fisher_exact([[s['00'],s['01']],
[s['10'],s['11']]])
return p
```
#### filter_pathway_hits
```
def filter_pathway_hits(hits, gs, cutoff=.00001):
'''
Takes a vector of p-values and a DataFrame of binary defined gene-sets.
Uses the ordering defined by hits to do a greedy filtering on the gene sets.
'''
l = [hits.index[0]]
for gg in hits.index:
flag = 0
for g2 in l:
if gg in l:
flag = 1
break
elif (chi2_cont_test(gs[gg], gs[g2])['p'] < cutoff):
flag = 1
break
if flag == 0:
l.append(gg)
hits_filtered = hits.ix[l]
return hits_filtered
```
| github_jupyter |
## Taxi Cab Classification (prior to TF2)
This notebook presents a simplified version of Kubeflow's *taxi cab clasification* pipeline, built upon TFX components.
Here all the pipeline components are stripped down to their core to showcase how to run it in a self-contained local Juyter Noteobok.
Additionally, the pipeline has been upgraded to work with Python3 and all major libraries (Tensorflow, Tensorflow Transform, Tensorflow Model Analysis, Tensorflow Data Validation, Apache Beam) have been bumped to their latests versions.
```
!pip install tensorflow==1.15.0 --user
!pip install apache_beam tensorflow_transform tensorflow_model_analysis tensorflow_data_validation --user
```
You may have to restart the workbook after installing these packages
```
import os
import shutil
import logging
import apache_beam as beam
import tensorflow as tf
import tensorflow_transform as tft
import tensorflow_model_analysis as tfma
import tensorflow_data_validation as tfdv
from apache_beam.io import textio
from apache_beam.io import tfrecordio
from tensorflow_transform.beam import impl as beam_impl
from tensorflow_transform.beam.tft_beam_io import transform_fn_io
from tensorflow_transform.coders.csv_coder import CsvCoder
from tensorflow_transform.coders.example_proto_coder import ExampleProtoCoder
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import metadata_io
DATA_DIR = 'data/'
TRAIN_DATA = os.path.join(DATA_DIR, 'taxi-cab-classification/train.csv')
EVALUATION_DATA = os.path.join(DATA_DIR, 'taxi-cab-classification/eval.csv')
# Categorical features are assumed to each have a maximum value in the dataset.
MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
CATEGORICAL_FEATURE_KEYS = ['trip_start_hour', 'trip_start_day', 'trip_start_month']
DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
# Number of buckets used by tf.transform for encoding each feature.
FEATURE_BUCKET_COUNT = 10
BUCKET_FEATURE_KEYS = ['pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude']
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
OOV_SIZE = 10
VOCAB_FEATURE_KEYS = ['pickup_census_tract', 'dropoff_census_tract', 'payment_type', 'company',
'pickup_community_area', 'dropoff_community_area']
# allow nan values in these features.
OPTIONAL_FEATURES = ['dropoff_latitude', 'dropoff_longitude', 'pickup_census_tract', 'dropoff_census_tract',
'company', 'trip_seconds', 'dropoff_community_area']
LABEL_KEY = 'tips'
FARE_KEY = 'fare'
# training parameters
EPOCHS = 1
STEPS = 3
BATCH_SIZE = 32
HIDDEN_LAYER_SIZE = '1500'
LEARNING_RATE = 0.1
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
# tf.get_logger().setLevel(logging.ERROR)
```
#### Data Validation
For an overview of the TFDV functions: https://www.tensorflow.org/tfx/tutorials/data_validation/chicago_taxi
```
vldn_output = os.path.join(DATA_DIR, 'validation')
# TODO: Understand why this was used in the conversion to the output json
# key columns: list of the names for columns that should be treated as unique keys.
key_columns = ['trip_start_timestamp']
# read the first line of the cvs to have and ordered list of column names
# (the Schema will scrable the features)
with open(TRAIN_DATA) as f:
column_names = f.readline().strip().split(',')
stats = tfdv.generate_statistics_from_csv(data_location=TRAIN_DATA)
schema = tfdv.infer_schema(stats)
eval_stats = tfdv.generate_statistics_from_csv(data_location=EVALUATION_DATA)
anomalies = tfdv.validate_statistics(eval_stats, schema)
# Log anomalies
for feature_name, anomaly_info in anomalies.anomaly_info.items():
logging.getLogger().error(
'Anomaly in feature "{}": {}'.format(
feature_name, anomaly_info.description))
# show inferred schema
tfdv.display_schema(schema=schema)
# Resolve anomalies
company = tfdv.get_feature(schema, 'company')
company.distribution_constraints.min_domain_mass = 0.9
# Add new value to the domain of feature payment_type.
payment_type_domain = tfdv.get_domain(schema, 'payment_type')
payment_type_domain.value.append('Prcard')
# Validate eval stats after updating the schema
updated_anomalies = tfdv.validate_statistics(eval_stats, schema)
tfdv.display_anomalies(updated_anomalies)
```
#### Data Transformation
For an overview of the TFT functions: https://www.tensorflow.org/tfx/tutorials/transform/simple
```
def to_dense(tensor):
"""Takes as input a SparseTensor and return a Tensor with correct default value
Args:
tensor: tf.SparseTensor
Returns:
tf.Tensor with default value
"""
if not isinstance(tensor, tf.sparse.SparseTensor):
return tensor
if tensor.dtype == tf.string:
default_value = ''
elif tensor.dtype == tf.float32:
default_value = 0.0
elif tensor.dtype == tf.int32:
default_value = 0
else:
raise ValueError(f"Tensor type not recognized: {tensor.dtype}")
return tf.squeeze(tf.sparse_to_dense(tensor.indices,
[tensor.dense_shape[0], 1],
tensor.values, default_value=default_value), axis=1)
# TODO: Update to below version
# return tf.squeeze(tf.sparse.to_dense(tensor, default_value=default_value), axis=1)
def preprocess_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as a dense float, setting nan's to the mean.
outputs[key] = tft.scale_to_z_score(to_dense(inputs[key]))
for key in VOCAB_FEATURE_KEYS:
# Build a vocabulary for this feature.
if inputs[key].dtype == tf.string:
vocab_tensor = to_dense(inputs[key])
else:
vocab_tensor = tf.as_string(to_dense(inputs[key]))
outputs[key] = tft.compute_and_apply_vocabulary(
vocab_tensor, vocab_filename='vocab_' + key,
top_k=VOCAB_SIZE, num_oov_buckets=OOV_SIZE)
for key in BUCKET_FEATURE_KEYS:
outputs[key] = tft.bucketize(to_dense(inputs[key]), FEATURE_BUCKET_COUNT)
for key in CATEGORICAL_FEATURE_KEYS:
outputs[key] = tf.cast(to_dense(inputs[key]), tf.int64)
taxi_fare = to_dense(inputs[FARE_KEY])
taxi_tip = to_dense(inputs[LABEL_KEY])
# Test if the tip was > 20% of the fare.
tip_threshold = tf.multiply(taxi_fare, tf.constant(0.2))
outputs[LABEL_KEY] = tf.logical_and(
tf.logical_not(tf.math.is_nan(taxi_fare)),
tf.greater(taxi_tip, tip_threshold))
for key in outputs:
if outputs[key].dtype == tf.bool:
outputs[key] = tft.compute_and_apply_vocabulary(tf.as_string(outputs[key]),
vocab_filename='vocab_' + key)
return outputs
trns_output = os.path.join(DATA_DIR, "transformed")
if os.path.exists(trns_output):
shutil.rmtree(trns_output)
tft_input_metadata = dataset_metadata.DatasetMetadata(schema)
runner = 'DirectRunner'
with beam.Pipeline(runner, options=None) as p:
with beam_impl.Context(temp_dir=os.path.join(trns_output, 'tmp')):
converter = CsvCoder(column_names, tft_input_metadata.schema)
# READ TRAIN DATA
train_data = (
p
| 'ReadTrainData' >> textio.ReadFromText(TRAIN_DATA, skip_header_lines=1)
| 'DecodeTrainData' >> beam.Map(converter.decode))
# TRANSFORM TRAIN DATA (and get transform_fn function)
transformed_dataset, transform_fn = (
(train_data, tft_input_metadata) | beam_impl.AnalyzeAndTransformDataset(preprocess_fn))
transformed_data, transformed_metadata = transformed_dataset
# SAVE TRANSFORMED TRAIN DATA
_ = transformed_data | 'WriteTrainData' >> tfrecordio.WriteToTFRecord(
os.path.join(trns_output, 'train'),
coder=ExampleProtoCoder(transformed_metadata.schema))
# READ EVAL DATA
eval_data = (
p
| 'ReadEvalData' >> textio.ReadFromText(EVALUATION_DATA, skip_header_lines=1)
| 'DecodeEvalData' >> beam.Map(converter.decode))
# TRANSFORM EVAL DATA (using previously created transform_fn function)
eval_dataset = (eval_data, tft_input_metadata)
transformed_eval_data, transformed_metadata = (
(eval_dataset, transform_fn) | beam_impl.TransformDataset())
# SAVE EVAL DATA
_ = transformed_eval_data | 'WriteEvalData' >> tfrecordio.WriteToTFRecord(
os.path.join(trns_output, 'eval'),
coder=ExampleProtoCoder(transformed_metadata.schema))
# SAVE transform_fn FUNCTION FOR LATER USE
# TODO: check out what is the transform function (transform_fn) that came from previous step
_ = (transform_fn | 'WriteTransformFn' >> transform_fn_io.WriteTransformFn(trns_output))
# SAVE TRANSFORMED METADATA
metadata_io.write_metadata(
metadata=tft_input_metadata,
path=os.path.join(trns_output, 'metadata'))
```
#### Train
Estimator API: https://www.tensorflow.org/guide/premade_estimators
```
def training_input_fn(transformed_output, transformed_examples, batch_size, target_name):
"""
Args:
transformed_output: tft.TFTransformOutput
transformed_examples: Base filename of examples
batch_size: Batch size.
target_name: name of the target column.
Returns:
The input function for training or eval.
"""
dataset = tf.data.experimental.make_batched_features_dataset(
file_pattern=transformed_examples,
batch_size=batch_size,
features=transformed_output.transformed_feature_spec(),
reader=tf.data.TFRecordDataset,
shuffle=True)
transformed_features = dataset.make_one_shot_iterator().get_next()
transformed_labels = transformed_features.pop(target_name)
return transformed_features, transformed_labels
def get_feature_columns():
"""Callback that returns a list of feature columns for building a tf.estimator.
Returns:
A list of tf.feature_column.
"""
return (
[tf.feature_column.numeric_column(key, shape=()) for key in DENSE_FLOAT_FEATURE_KEYS] +
[tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_identity(key, num_buckets=VOCAB_SIZE + OOV_SIZE)) for key in VOCAB_FEATURE_KEYS] +
[tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_identity(key, num_buckets=FEATURE_BUCKET_COUNT, default_value=0)) for key in BUCKET_FEATURE_KEYS] +
[tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_identity(key, num_buckets=num_buckets, default_value=0)) for key, num_buckets in zip(CATEGORICAL_FEATURE_KEYS, MAX_CATEGORICAL_FEATURE_VALUES)]
)
training_output = os.path.join(DATA_DIR, "training")
if os.path.exists(training_output):
shutil.rmtree(training_output)
hidden_layer_size = [int(x.strip()) for x in HIDDEN_LAYER_SIZE.split(',')]
tf_transform_output = tft.TFTransformOutput(trns_output)
# Set how often to run checkpointing in terms of steps.
config = tf.estimator.RunConfig(save_checkpoints_steps=1000)
n_classes = tf_transform_output.vocabulary_size_by_name("vocab_" + LABEL_KEY)
# Create estimator
estimator = tf.estimator.DNNClassifier(
feature_columns=get_feature_columns(),
hidden_units=hidden_layer_size,
n_classes=n_classes,
config=config,
model_dir=training_output)
# TODO: Simplify all this: https://www.tensorflow.org/guide/premade_estimators
estimator.train(input_fn=lambda: training_input_fn(
tf_transform_output,
os.path.join(trns_output, 'train' + '*'),
BATCH_SIZE,
"tips"),
steps=STEPS)
eval_result = estimator.evaluate(input_fn=lambda: training_input_fn(
tf_transform_output,
os.path.join(trns_output, 'eval' + '*'),
BATCH_SIZE,
"tips"),
steps=50)
print(eval_result)
```
#### Model Analysis
TF Model Analysis docs: https://www.tensorflow.org/tfx/model_analysis/get_started
```
# TODO: Implement model load and params analysis
def eval_input_receiver_fn(transformed_output):
"""Build everything needed for the tf-model-analysis to run the model.
Args:
transformed_output: tft.TFTransformOutput
Returns:
EvalInputReceiver function, which contains:
- Tensorflow graph which parses raw untranformed features, applies the
tf-transform preprocessing operators.
- Set of raw, untransformed features.
- Label against which predictions will be compared.
"""
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
features = tf.io.parse_example(serialized_tf_example, transformed_output.raw_feature_spec())
transformed_features = transformed_output.transform_raw_features(features)
receiver_tensors = {'examples': serialized_tf_example}
return tfma.export.EvalInputReceiver(
features=transformed_features,
receiver_tensors=receiver_tensors,
labels=transformed_features[LABEL_KEY])
# EXPORT MODEL
eval_model_dir = os.path.join(training_output, 'tfma_eval_model_dir')
tfma.export.export_eval_savedmodel(
estimator=estimator,
export_dir_base=eval_model_dir,
eval_input_receiver_fn=(lambda: eval_input_receiver_fn(tf_transform_output)))
```
| github_jupyter |
# Advanced usage
This notebook shows some more advanced features of `skorch`. More examples will be added with time.
<table align="left"><td>
<a target="_blank" href="https://colab.research.google.com/github/skorch-dev/skorch/blob/master/notebooks/Advanced_Usage.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td><td>
<a target="_blank" href="https://github.com/skorch-dev/skorch/blob/master/notebooks/Advanced_Usage.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a></td></table>
### Table of contents
* [Setup](#Setup)
* [Callbacks](#Callbacks)
* [Writing your own callback](#Writing-a-custom-callback)
* [Accessing callback parameters](#Accessing-callback-parameters)
* [Working with different data types](#Working-with-different-data-types)
* [Working with datasets](#Working-with-Datasets)
* [Working with dicts](#Working-with-dicts)
* [Multiple return values](#Multiple-return-values-from-forward)
* [Implementing a simple autoencoder](#Implementing-a-simple-autoencoder)
* [Training the autoencoder](#Training-the-autoencoder)
* [Extracting the decoder and the encoder output](#Extracting-the-decoder-and-the-encoder-output)
```
! [ ! -z "$COLAB_GPU" ] && pip install torch skorch
import torch
from torch import nn
import torch.nn.functional as F
torch.manual_seed(0)
torch.cuda.manual_seed(0)
```
## Setup
### A toy binary classification task
We load a toy classification task from `sklearn`.
```
import numpy as np
from sklearn.datasets import make_classification
np.random.seed(0)
X, y = make_classification(1000, 20, n_informative=10, random_state=0)
X, y = X.astype(np.float32), y.astype(np.int64)
X.shape, y.shape, y.mean()
```
### Definition of the `pytorch` classification `module`
We define a vanilla neural network with two hidden layers. The output layer should have 2 output units since there are two classes. In addition, it should have a softmax nonlinearity, because later, when calling `predict_proba`, the output from the `forward` call will be used.
```
from skorch import NeuralNetClassifier
class ClassifierModule(nn.Module):
def __init__(
self,
num_units=10,
nonlin=F.relu,
dropout=0.5,
):
super(ClassifierModule, self).__init__()
self.num_units = num_units
self.nonlin = nonlin
self.dropout = dropout
self.dense0 = nn.Linear(20, num_units)
self.nonlin = nonlin
self.dropout = nn.Dropout(dropout)
self.dense1 = nn.Linear(num_units, 10)
self.output = nn.Linear(10, 2)
def forward(self, X, **kwargs):
X = self.nonlin(self.dense0(X))
X = self.dropout(X)
X = F.relu(self.dense1(X))
X = F.softmax(self.output(X), dim=-1)
return X
```
## Callbacks
Callbacks are a powerful and flexible way to customize the behavior of your neural network. They are all called at specific points during the model training, e.g. when training starts, or after each batch. Have a look at the `skorch.callbacks` module to see the callbacks that are already implemented.
### Writing a custom callback
Although `skorch` comes with a handful of useful callbacks, you may find that you would like to write your own callbacks. Doing so is straightforward, just remember these rules:
* They should inherit from `skorch.callbacks.Callback`.
* They should implement at least one of the `on_`-methods provided by the parent class (e.g. `on_batch_begin` or `on_epoch_end`).
* As argument, the `on_`-methods first get the `NeuralNet` instance, and, where appropriate, the local data (e.g. the data from the current batch). The method should also have `**kwargs` in the signature for potentially unused arguments.
* *Optional*: If you have attributes that should be reset when the model is re-initialized, those attributes should be set in the `initialize` method.
Here is an example of a callback that remembers at which epoch the validation accuracy reached a certain value. Then, when training is finished, it calls a mock Twitter API and tweets that epoch. We proceed as follows:
* We set the desired minimum accuracy during `__init__`.
* We set the critical epoch during `initialize`.
* After each epoch, if the critical accuracy has not yet been reached, we check if it was reached.
* When training finishes, we send a tweet informing us whether our training was successful or not.
```
from skorch.callbacks import Callback
def tweet(msg):
print("~" * 60)
print("*tweet*", msg, "#skorch #pytorch")
print("~" * 60)
class AccuracyTweet(Callback):
def __init__(self, min_accuracy):
self.min_accuracy = min_accuracy
def initialize(self):
self.critical_epoch_ = -1
def on_epoch_end(self, net, **kwargs):
if self.critical_epoch_ > -1:
return
# look at the validation accuracy of the last epoch
if net.history[-1, 'valid_acc'] >= self.min_accuracy:
self.critical_epoch_ = len(net.history)
def on_train_end(self, net, **kwargs):
if self.critical_epoch_ < 0:
msg = "Accuracy never reached {} :(".format(self.min_accuracy)
else:
msg = "Accuracy reached {} at epoch {}!!!".format(
self.min_accuracy, self.critical_epoch_)
tweet(msg)
```
Now we initialize a `NeuralNetClassifier` and pass your new callback in a list to the `callbacks` argument. After that, we train the model and see what happens.
```
net = NeuralNetClassifier(
ClassifierModule,
max_epochs=15,
lr=0.02,
warm_start=True,
callbacks=[AccuracyTweet(min_accuracy=0.7)],
)
net.fit(X, y)
```
Oh no, our model never reached a validation accuracy of 0.7. Let's train some more (this is possible because we set `warm_start=True`):
```
net.fit(X, y)
assert net.history[-1, 'valid_acc'] >= 0.7
```
Finally, the validation score exceeded 0.7. Hooray!
### Accessing callback parameters
Say you would like to use a learning rate schedule with your neural net, but you don't know what parameters are best for that schedule. Wouldn't it be nice if you could find those parameters with a grid search? With `skorch`, this is possible. Below, we show how to access the parameters of your callbacks.
To simplify the access to your callback parameters, it is best if you give your callback a name. This is achieved by passing the `callbacks` parameter a list of *name*, *callback* tuples, such as:
callbacks=[
('scheduler', LearningRateScheduler)),
...
],
This way, you can access your callbacks using the double underscore semantics (as, for instance, in an `sklearn` `Pipeline`):
callbacks__scheduler__epoch=50,
So if you would like to perform a grid search on, say, the number of units in the hidden layer and the learning rate schedule, it could look something like this:
param_grid = {
'module__num_units': [50, 100, 150],
'callbacks__scheduler__epoch': [10, 50, 100],
}
*Note*: If you would like to refresh your knowledge on grid search, look [here](http://scikit-learn.org/stable/modules/grid_search.html#grid-search), [here](http://scikit-learn.org/stable/auto_examples/model_selection/grid_search_text_feature_extraction.html), or in the *Basic_Usage* notebok.
Below, we show how accessing the callback parameters works our `AccuracyTweet` callback:
```
net = NeuralNetClassifier(
ClassifierModule,
max_epochs=10,
lr=0.1,
warm_start=True,
callbacks=[
('tweet', AccuracyTweet(min_accuracy=0.7)),
],
callbacks__tweet__min_accuracy=0.6,
)
net.fit(X, y)
```
As you can see, by passing `callbacks__tweet__min_accuracy=0.6`, we changed that parameter. The same can be achieved by calling the `set_params` method with the corresponding arguments:
```
net.set_params(callbacks__tweet__min_accuracy=0.75)
net.fit(X, y)
```
## Working with different data types
### Working with `Dataset`s
We encourage you to not pass `Dataset`s to `net.fit` but to let skorch handle `Dataset`s internally. Nonetheless, there are situations where passing `Dataset`s to `net.fit` is hard to avoid (e.g. if you want to load the data lazily during the training). This is supported by skorch but may have some unwanted side-effects relating to sklearn. For instance, `Dataset`s cannot split into train and validation in a stratified fashion without explicit knowledge of the classification targets.
Below we show what happens when you try to fit with `Dataset` and the stratified split fails:
```
class MyDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = X
self.y = y
assert len(X) == len(y)
def __len__(self):
return len(self.X)
def __getitem__(self, i):
return self.X[i], self.y[i]
X, y = make_classification(1000, 20, n_informative=10, random_state=0)
X, y = X.astype(np.float32), y.astype(np.int64)
dataset = MyDataset(X, y)
net = NeuralNetClassifier(ClassifierModule)
try:
net.fit(dataset, y=None)
except ValueError as e:
print("Error:", e)
net.train_split.stratified
```
As you can see, the stratified split fails since `y` is not known. There are two solutions to this:
* turn off stratified splitting ( `net.train_split.stratified=False`)
* pass `y` explicitly (if possible), even if it is implicitely contained in the `Dataset`
The second solution is shown below:
```
net.fit(dataset, y=y)
```
### Working with dicts
#### The standard case
skorch has built-in support for dictionaries as data containers. Here we show a somewhat contrived example of how to use dicts, but it should get the point across. First we create data and put it into a dictionary `X_dict` with two keys `X0` and `X1`:
```
X, y = make_classification(1000, 20, n_informative=10, random_state=0)
X, y = X.astype(np.float32), y.astype(np.int64)
X0, X1 = X[:, :10], X[:, 10:]
X_dict = {'X0': X0, 'X1': X1}
```
When skorch passes the dict to the pytorch module, it will pass the data as keyword arguments to the forward call. That means that we should accept the two keys `XO` and `X1` in the forward method, as shown below:
```
class ClassifierWithDict(nn.Module):
def __init__(
self,
num_units0=50,
num_units1=50,
nonlin=F.relu,
dropout=0.5,
):
super(ClassifierWithDict, self).__init__()
self.num_units0 = num_units0
self.num_units1 = num_units1
self.nonlin = nonlin
self.dropout = dropout
self.dense0 = nn.Linear(10, num_units0)
self.dense1 = nn.Linear(10, num_units1)
self.nonlin = nonlin
self.dropout = nn.Dropout(dropout)
self.output = nn.Linear(num_units0 + num_units1, 2)
# NOTE: We accept X0 and X1, the keys from the dict, as arguments
def forward(self, X0, X1, **kwargs):
X0 = self.nonlin(self.dense0(X0))
X0 = self.dropout(X0)
X1 = self.nonlin(self.dense1(X1))
X1 = self.dropout(X1)
X = torch.cat((X0, X1), dim=1)
X = F.relu(X)
X = F.softmax(self.output(X), dim=-1)
return X
```
As long as we keep this in mind, we are good to go.
```
net = NeuralNetClassifier(ClassifierWithDict, verbose=0)
net.fit(X_dict, y)
```
#### Working with sklearn `Pipeline` and `GridSearchCV`
```
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
from sklearn.model_selection import GridSearchCV
```
sklearn makes the assumption that incoming data should be numpy/sparse arrays or something similar. This clashes with the use of dictionaries. Unfortunately, it is sometimes impossible to work around that for now (for instance using skorch with `BaggingClassifier`). Other times, there are possibilities.
When we have a preprocessing pipeline that involves `FunctionTransformer`, we have to pass the parameter `validate=False` (which is the default value now) so that sklearn allows the dictionary to pass through. Everything else works:
```
pipe = Pipeline([
('do-nothing', FunctionTransformer(validate=False)),
('net', net),
])
pipe.fit(X_dict, y)
```
When trying a grid or randomized search, it is not that easy to pass a dict. If we try, we will get an error:
```
param_grid = {
'net__module__num_units0': [10, 25, 50],
'net__module__num_units1': [10, 25, 50],
'net__lr': [0.01, 0.1],
}
grid_search = GridSearchCV(pipe, param_grid, scoring='accuracy', verbose=1, cv=3)
try:
grid_search.fit(X_dict, y)
except Exception as e:
print(e)
```
The error above occurs because sklearn gets the length of the input data, which is 2 for the dict, and believes that is inconsistent with the length of the target (1000).
To get around that, skorch provides a helper class called `SliceDict`. It allows us to wrap our dictionaries so that they also behave like a numpy array:
```
from skorch.helper import SliceDict
X_slice_dict = SliceDict(X0=X0, X1=X1) # X_slice_dict = SliceDict(**X_dict) would also work
```
The SliceDict shows the correct length, shape, and is sliceable across values:
```
print("Length of dict: {}, length of SliceDict: {}".format(len(X_dict), len(X_slice_dict)))
print("Shape of SliceDict: {}".format(X_slice_dict.shape))
print("Slicing the SliceDict slices across values: {}".format(X_slice_dict[:2]))
```
With this, we can call `GridSearchCV` just as expected:
```
grid_search.fit(X_slice_dict, y)
grid_search.best_score_, grid_search.best_params_
```
## Multiple return values from `forward`
Often, we want our `Module.forward` method to return more than just one value. There can be several reasons for this. Maybe, the criterion requires not one but several outputs. Or perhaps we want to inspect intermediate values to learn more about our model (say inspecting attention in a sequence-to-sequence model). Fortunately, `skorch` makes it easy to achieve this. In the following, we demonstrate how to handle multiple outputs from the `Module`.
To demonstrate this, we implement a very simple autoencoder. It consists of an encoder that reduces our input of 20 units to 5 units using two linear layers, and a decoder that tries to reconstruct the original input, again using two linear layers.
### Implementing a simple autoencoder
```
from skorch import NeuralNetRegressor
class Encoder(nn.Module):
def __init__(self, num_units=5):
super().__init__()
self.num_units = num_units
self.encode = nn.Sequential(
nn.Linear(20, 10),
nn.ReLU(),
nn.Linear(10, self.num_units),
nn.ReLU(),
)
def forward(self, X):
encoded = self.encode(X)
return encoded
class Decoder(nn.Module):
def __init__(self, num_units):
super().__init__()
self.num_units = num_units
self.decode = nn.Sequential(
nn.Linear(self.num_units, 10),
nn.ReLU(),
nn.Linear(10, 20),
)
def forward(self, X):
decoded = self.decode(X)
return decoded
```
The autoencoder module below actually returns a tuple of two values, the decoded input and the encoded input. This way, we cannot only use the decoded input to calculate the normal loss but also have access to the encoded state.
```
class AutoEncoder(nn.Module):
def __init__(self, num_units):
super().__init__()
self.num_units = num_units
self.encoder = Encoder(num_units=self.num_units)
self.decoder = Decoder(num_units=self.num_units)
def forward(self, X):
encoded = self.encoder(X)
decoded = self.decoder(encoded)
return decoded, encoded # <- return a tuple of two values
```
Since the module's `forward` method returns two values, we have to adjust our objective to do the right thing with those values. If we don't do this, the criterion wouldn't know what to do with the two values and would raise an error.
One strategy would be to only use the decoded state for the loss and discard the encoded state. For this demonstration, we have a different plan: We would like the encoded state to be sparse. Therefore, we add an L1 loss of the encoded state to the reconstruction loss. This way, the net will try to reconstruct the input as accurately as possible while keeping the encoded state as sparse as possible.
To implement this, the right method to override is called `get_loss`, which is where `skorch` computes and returns the loss. It gets the prediction (our tuple) and the target as input, as well as other arguments and keywords that we pass through. We create a subclass of `NeuralNetRegressor` that overrides said method and implements our idea for the loss.
```
class AutoEncoderNet(NeuralNetRegressor):
def get_loss(self, y_pred, y_true, *args, **kwargs):
decoded, encoded = y_pred # <- unpack the tuple that was returned by `forward`
loss_reconstruction = super().get_loss(decoded, y_true, *args, **kwargs)
loss_l1 = 1e-3 * torch.abs(encoded).sum()
return loss_reconstruction + loss_l1
```
*Note*: Alternatively, we could have used an unaltered `NeuralNetRegressor` but implement a custom criterion that is responsible for unpacking the tuple and computing the loss.
### Training the autoencoder
Now that everything is ready, we train the model as usual. We initialize our net subclass with the `AutoEncoder` module and call the `fit` method with `X` both as input and as target (since we want to reconstruct the original data):
```
net = AutoEncoderNet(
AutoEncoder,
module__num_units=5,
lr=0.3,
)
net.fit(X, X)
```
Voilà, the model was trained using our custom loss function that makes use of both predicted values.
### Extracting the decoder and the encoder output
Sometimes, we may wish to inspect all the values returned by the `foward` method of the module. There are several ways to achieve this. In theory, we can always access the module directly by using the `net.module_` attribute. However, this is unwieldy, since this completely shortcuts the prediction loop, which takes care of important steps like casting `numpy` arrays to `pytorch` tensors and batching.
Also, we cannot use the `predict` method on the net. This method will only return the first output from the forward method, in this case the decoded state. The reason for this is that `predict` is part of the `sklearn` API, which requires there to be only one output. This is shown below:
```
y_pred = net.predict(X)
y_pred.shape # only the decoded state is returned
```
However, the net itself provides two methods to retrieve all outputs. The first one is the `net.forward` method, which retrieves *all* the predicted batches from the `Module.forward` and concatenates them. Use this to retrieve the complete decoded and encoded state:
```
decoded_pred, encoded_pred = net.forward(X)
decoded_pred.shape, encoded_pred.shape
```
The other method is called `net.forward_iter`. It is similar to `net.forward` but instead of collecting all the batches, this method is lazy and only yields one batch at a time. This can be especially useful if the output doesn't fit into memory:
```
for decoded_pred, encoded_pred in net.forward_iter(X):
# do something with each batch
break
decoded_pred.shape, encoded_pred.shape
```
Finally, let's make sure that our initial goal of having a sparse encoded state was met. We check how many activities are close to zero:
```
torch.isclose(encoded_pred, torch.zeros_like(encoded_pred)).float().mean()
```
As we had hoped, the encoded state is quite sparse, with the majority of outpus being 0.
| github_jupyter |
### Introduction to Nilearn and image manipulation
The goal of this notebook is to help get you comfortable with manipulating functional and anatomical images using nilearn. We'll be using the techniques we learned here in our final analysis...
#### Content:
1. Basic Image Operations and Masking
2. Resampling data to work across modalities (T1/FUNC)
```
import os
import matplotlib.pyplot as plt
from nilearn import image as img
from nilearn import plotting as plot
from bids import BIDSLayout
#for inline visualization in jupyter notebook
%matplotlib inline
```
As we've done in the past we've imported <code>image as img</code>. However, we've also imported <code>plotting as plot</code> from <code>nilearn</code>. This will allow us to easily visualize our neuroimaging data!
First let’s grab some data from where we downloaded our FMRIPREP outputs using PyBIDS:
```
#Base directory for fmriprep output
fmriprep_dir = '../data/ds000030/derivatives/fmriprep/'
layout= BIDSLayout(fmriprep_dir, validate=False)
T1w_files = layout.get(subject='10788', datatype='anat', suffix='preproc')
brainmask_files = layout.get(subject='10788', datatype='anat', suffix='brainmask')
#Display preprocessed files inside of anatomy folder
for f in T1w_files:
print(f.path)
```
## Basic Image Operations
In this section we're going to deal with the following files:
1. <code>sub-10171_T1w_preproc.nii.gz</code> - the T1 image in native space
2. <code>sub-10171_T1w_brainmask.nii.gz</code> - a mask with 1's representing the brain and 0's elsewhere.
```
t1 = T1w_files[0].path
bm = brainmask_files[0].path
t1_img = img.load_img(t1)
bm_img = img.load_img(bm)
```
First we'll do what you've been waiting to do - plot our MR image! This can be easily achieved using Nilearn's <code>plotting</code> module as follows:
```
plot.plot_anat(t1_img)
```
Try viewing the mask as well!
```
#View the mask image
plot.plot_anat(bm_img)
```
### Arithmetic Operations
Let’s start performing some image operations. The simplest operations we can perform is element-wise, what this means is that we want to perform some sort of mathematical operation on each voxel of the MR image. Since voxels are represented in a 3D array, this is equivalent to performing an operation on each element (i,j,k) of a 3D array. Let’s try inverting the image, that is, flip the colour scale such that all blacks appear white and vice-versa. To do this, we’ll use the method
<code>img.math_img(formula, **imgs)</code> Where:
- <code>formula</code> is a mathematical expression such as 'a+1'
- </code>**imgs</code> is a set of key-value pairs linking variable names to images. For example a=T1
In order to invert the image, we can simply flip the sign which will set the most positive elements (white) to the most negative elements (black), and the least positives elements (black) to the least negative elements (white). This effectively flips the colour-scale:
```
invert_img = img.math_img('-a', a=t1_img)
plot.plot_anat(invert_img)
```
Alternatively we don't need to first load in our <code>t1_img</code> using <code>img.load_img</code>. Instead we can feed in a path to <code>img.math_img</code>:
~~~
invert_img = img.math_img('-a', a=t1)
plot.plot_anat(invert_img)
~~~
This will yield the same result!
### Applying a Mask
Let’s extend this idea of applying operations to each element of an image to multiple images. Instead of specifying just one image like the following:
<code>img.math_img('a+1',a=img_a)</code>
We can specify multiple images by tacking on additional variables:
<code>img.math_img('a+b', a=img_a, b=img_b)</code>
The key requirement here is that when dealing with multiple images, that the size of the images must be the same. The reason being is that we’re deaing with element-wise operations. That means that some voxel (i,j,k) in img_a is being paired with some voxel (i,j,k) in <code>img_b</code> when performing operations. So every voxel in <code>img_a</code> must have some pair with a voxel in <code>img_b</code>; sizes must be the same.
We can take advantage of this property when masking our data using multiplication. Masking works by multipling a raw image (our <code>T1</code>), with some mask image (our <code>bm</code>). Whichever voxel (i,j,k) has a value of 0 in the mask multiplies with voxel (i,j,k) in the raw image resulting in a product of 0. Conversely, any voxel (i,j,k) in the mask with a value of 1 multiplies with voxel (i,j,k) in the raw image resulting in the same value. Let’s try this out in practice and see what the result is:
```
masked_t1 = img.math_img('a*b', a=t1, b=bm)
plot.plot_anat(masked_t1)
```
#### Exercise!
Try applying the mask such that the brain is removed, but the rest of the head is intact!
*Hint*:
Remember that a mask is composed of 0's and 1's, where parts of the data labelled 1 are regions to keep, and parts of the data that are 0, are to throw away.
You can do this in 2 steps:
1. Switch the 0's and 1's using an equation (simple addition/substraction) or condition (like x == 0).
2. Apply the mask
```
inverted_mask = img.math_img('1-x', x=bm)
plot.plot_anat(inverted_mask)
inverted_mask_t1 = img.math_img('a*b', a=t1, b=inverted_mask)
plot.plot_anat(inverted_mask_t1)
```
### Slicing
Recall that our data matrix is organized in the following manner:
<img src="./static/images/numpy_arrays.png" alt="Drawing" align="middle" width="500px"/>
Slicing does exactly what it seems to imply. Given our 3D volume, we can pull out a 2D subset (called a "slice"). Here's an example of slicing moving from left to right via an animation:
<img src="https://upload.wikimedia.org/wikipedia/commons/5/56/Parasagittal_MRI_of_human_head_in_patient_with_benign_familial_macrocephaly_prior_to_brain_injury_%28ANIMATED%29.gif"/>
What you see here is a series of 2D images that start from the left, and move toward the right. Each frame of this GIF is a slice - a 2D subset of a 3D volume. Slicing can be useful for cases in which you'd want to loop through each MR slice and perform a computation; importantly in functional imaging data slicing is useful for pulling out timepoints as we'll see later!
***
Sourced from: https://en.wikipedia.org/wiki/Neuroimaging#/media/File:Parasagittal_MRI_of_human_head_in_patient_with_benign_familial_macrocephaly_prior_to_brain_injury_(ANIMATED).gif
***
Slicing is done easily on an image file using the attribute <code>.slicer</code> of a Nilearn <code>image</code> object. For example we can grab the $10^{\text{th}}$ slice along the x axis as follows:
```
x_slice = t1_img.slicer[10:11,:,:]
```
The statement $10:11$ is intentional and is required by <code>.slicer</code>. Alternatively we can slice along the x-axis using the data matrix itself:
```
t1_data = t1_img.get_data()
x_slice = t1_data[10,:,:]
```
This will yield the same result as above. Notice that when using the <code>t1_data</code> array we can just specify which slice to grab instead of using <code>:</code>. We can use slicing in order to modify visualizations. For example, when viewing the T1 image, we may want to specify at which slice we'd like to view the image. This can be done by specifying which coordinates to *cut* the image at:
```
plot.plot_anat(t1_img,cut_coords=(50,30,70))
```
The <code>cut_coords</code> option specifies 3 numbers:
- The first number says cut the X coordinate at slice 50 and display (sagittal view in this case!)
- The second number says cut the Y coordinate at slice 30 and display (coronal view)
- The third number says cut the Z coordinate at slice 70 and display (axial view)
Remember <code>plot.plot_anat</code> yields 3 images, therefore <code>cut_coords</code> allows you to display where to take cross-sections of the brain from different perspectives (axial, sagittal, coronal)
***
This covers the basics of image manipulation using T1 images. To review in this section we covered:
- Basic image arithmetic
- Visualization
- Slicing
In the next section we will cover how to integrate additional modalities (functional data) to what we've done so far using <code>Nilearn</code>. Then we can start using what we've learned in order to perform analysis and visualization!
| github_jupyter |
# Transfer Learning Template
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
import os, json, sys, time, random
import numpy as np
import torch
from torch.optim import Adam
from easydict import EasyDict
import matplotlib.pyplot as plt
from steves_models.steves_ptn import Steves_Prototypical_Network
from steves_utils.lazy_iterable_wrapper import Lazy_Iterable_Wrapper
from steves_utils.iterable_aggregator import Iterable_Aggregator
from steves_utils.ptn_train_eval_test_jig import PTN_Train_Eval_Test_Jig
from steves_utils.torch_sequential_builder import build_sequential
from steves_utils.torch_utils import get_dataset_metrics, ptn_confusion_by_domain_over_dataloader
from steves_utils.utils_v2 import (per_domain_accuracy_from_confusion, get_datasets_base_path)
from steves_utils.PTN.utils import independent_accuracy_assesment
from torch.utils.data import DataLoader
from steves_utils.stratified_dataset.episodic_accessor import Episodic_Accessor_Factory
from steves_utils.ptn_do_report import (
get_loss_curve,
get_results_table,
get_parameters_table,
get_domain_accuracies,
)
from steves_utils.transforms import get_chained_transform
```
# Allowed Parameters
These are allowed parameters, not defaults
Each of these values need to be present in the injected parameters (the notebook will raise an exception if they are not present)
Papermill uses the cell tag "parameters" to inject the real parameters below this cell.
Enable tags to see what I mean
```
required_parameters = {
"experiment_name",
"lr",
"device",
"seed",
"dataset_seed",
"n_shot",
"n_query",
"n_way",
"train_k_factor",
"val_k_factor",
"test_k_factor",
"n_epoch",
"patience",
"criteria_for_best",
"x_net",
"datasets",
"torch_default_dtype",
"NUM_LOGS_PER_EPOCH",
"BEST_MODEL_PATH",
"x_shape",
}
from steves_utils.CORES.utils import (
ALL_NODES,
ALL_NODES_MINIMUM_1000_EXAMPLES,
ALL_DAYS
)
from steves_utils.ORACLE.utils_v2 import (
ALL_DISTANCES_FEET_NARROWED,
ALL_RUNS,
ALL_SERIAL_NUMBERS,
)
standalone_parameters = {}
standalone_parameters["experiment_name"] = "STANDALONE PTN"
standalone_parameters["lr"] = 0.001
standalone_parameters["device"] = "cuda"
standalone_parameters["seed"] = 1337
standalone_parameters["dataset_seed"] = 1337
standalone_parameters["n_way"] = 8
standalone_parameters["n_shot"] = 3
standalone_parameters["n_query"] = 2
standalone_parameters["train_k_factor"] = 1
standalone_parameters["val_k_factor"] = 2
standalone_parameters["test_k_factor"] = 2
standalone_parameters["n_epoch"] = 50
standalone_parameters["patience"] = 10
standalone_parameters["criteria_for_best"] = "source_loss"
standalone_parameters["datasets"] = [
{
"labels": ALL_SERIAL_NUMBERS,
"domains": ALL_DISTANCES_FEET_NARROWED,
"num_examples_per_domain_per_label": 100,
"pickle_path": os.path.join(get_datasets_base_path(), "oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl"),
"source_or_target_dataset": "source",
"x_transforms": ["unit_mag", "minus_two"],
"episode_transforms": [],
"domain_prefix": "ORACLE_"
},
{
"labels": ALL_NODES,
"domains": ALL_DAYS,
"num_examples_per_domain_per_label": 100,
"pickle_path": os.path.join(get_datasets_base_path(), "cores.stratified_ds.2022A.pkl"),
"source_or_target_dataset": "target",
"x_transforms": ["unit_power", "times_zero"],
"episode_transforms": [],
"domain_prefix": "CORES_"
}
]
standalone_parameters["torch_default_dtype"] = "torch.float32"
standalone_parameters["x_net"] = [
{"class": "nnReshape", "kargs": {"shape":[-1, 1, 2, 256]}},
{"class": "Conv2d", "kargs": { "in_channels":1, "out_channels":256, "kernel_size":(1,7), "bias":False, "padding":(0,3), },},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features":256}},
{"class": "Conv2d", "kargs": { "in_channels":256, "out_channels":80, "kernel_size":(2,7), "bias":True, "padding":(0,3), },},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features":80}},
{"class": "Flatten", "kargs": {}},
{"class": "Linear", "kargs": {"in_features": 80*256, "out_features": 256}}, # 80 units per IQ pair
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm1d", "kargs": {"num_features":256}},
{"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}},
]
# Parameters relevant to results
# These parameters will basically never need to change
standalone_parameters["NUM_LOGS_PER_EPOCH"] = 10
standalone_parameters["BEST_MODEL_PATH"] = "./best_model.pth"
# Parameters
parameters = {
"experiment_name": "tl_3-jitter1:oracle.run1.framed -> cores+wisig",
"device": "cuda",
"lr": 0.001,
"seed": 1337,
"dataset_seed": 1337,
"n_shot": 3,
"n_query": 2,
"train_k_factor": 3,
"val_k_factor": 2,
"test_k_factor": 2,
"torch_default_dtype": "torch.float32",
"n_epoch": 50,
"patience": 3,
"criteria_for_best": "target_loss",
"x_net": [
{"class": "nnReshape", "kargs": {"shape": [-1, 1, 2, 256]}},
{
"class": "Conv2d",
"kargs": {
"in_channels": 1,
"out_channels": 256,
"kernel_size": [1, 7],
"bias": False,
"padding": [0, 3],
},
},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features": 256}},
{
"class": "Conv2d",
"kargs": {
"in_channels": 256,
"out_channels": 80,
"kernel_size": [2, 7],
"bias": True,
"padding": [0, 3],
},
},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features": 80}},
{"class": "Flatten", "kargs": {}},
{"class": "Linear", "kargs": {"in_features": 20480, "out_features": 256}},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm1d", "kargs": {"num_features": 256}},
{"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}},
],
"NUM_LOGS_PER_EPOCH": 10,
"BEST_MODEL_PATH": "./best_model.pth",
"n_way": 16,
"datasets": [
{
"labels": [
"1-10.",
"1-11.",
"1-15.",
"1-16.",
"1-17.",
"1-18.",
"1-19.",
"10-4.",
"10-7.",
"11-1.",
"11-14.",
"11-17.",
"11-20.",
"11-7.",
"13-20.",
"13-8.",
"14-10.",
"14-11.",
"14-14.",
"14-7.",
"15-1.",
"15-20.",
"16-1.",
"16-16.",
"17-10.",
"17-11.",
"17-2.",
"19-1.",
"19-16.",
"19-19.",
"19-20.",
"19-3.",
"2-10.",
"2-11.",
"2-17.",
"2-18.",
"2-20.",
"2-3.",
"2-4.",
"2-5.",
"2-6.",
"2-7.",
"2-8.",
"3-13.",
"3-18.",
"3-3.",
"4-1.",
"4-10.",
"4-11.",
"4-19.",
"5-5.",
"6-15.",
"7-10.",
"7-14.",
"8-18.",
"8-20.",
"8-3.",
"8-8.",
],
"domains": [1, 2, 3, 4, 5],
"num_examples_per_domain_per_label": 100,
"pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/cores.stratified_ds.2022A.pkl",
"source_or_target_dataset": "target",
"x_transforms": ["jitter_256_1", "take_200"],
"episode_transforms": [],
"domain_prefix": "C_A_",
},
{
"labels": [
"1-10",
"1-12",
"1-14",
"1-16",
"1-18",
"1-19",
"1-8",
"10-11",
"10-17",
"10-4",
"10-7",
"11-1",
"11-10",
"11-19",
"11-20",
"11-4",
"11-7",
"12-19",
"12-20",
"12-7",
"13-14",
"13-18",
"13-19",
"13-20",
"13-3",
"13-7",
"14-10",
"14-11",
"14-12",
"14-13",
"14-14",
"14-19",
"14-20",
"14-7",
"14-8",
"14-9",
"15-1",
"15-19",
"15-6",
"16-1",
"16-16",
"16-19",
"16-20",
"17-10",
"17-11",
"18-1",
"18-10",
"18-11",
"18-12",
"18-13",
"18-14",
"18-15",
"18-16",
"18-17",
"18-19",
"18-2",
"18-20",
"18-4",
"18-5",
"18-7",
"18-8",
"18-9",
"19-1",
"19-10",
"19-11",
"19-12",
"19-13",
"19-14",
"19-15",
"19-19",
"19-2",
"19-20",
"19-3",
"19-4",
"19-6",
"19-7",
"19-8",
"19-9",
"2-1",
"2-13",
"2-15",
"2-3",
"2-4",
"2-5",
"2-6",
"2-7",
"2-8",
"20-1",
"20-12",
"20-14",
"20-15",
"20-16",
"20-18",
"20-19",
"20-20",
"20-3",
"20-4",
"20-5",
"20-7",
"20-8",
"3-1",
"3-13",
"3-18",
"3-2",
"3-8",
"4-1",
"4-10",
"4-11",
"5-1",
"5-5",
"6-1",
"6-15",
"6-6",
"7-10",
"7-11",
"7-12",
"7-13",
"7-14",
"7-7",
"7-8",
"7-9",
"8-1",
"8-13",
"8-14",
"8-18",
"8-20",
"8-3",
"8-8",
"9-1",
"9-7",
],
"domains": [1, 2, 3, 4],
"num_examples_per_domain_per_label": 100,
"pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/wisig.node3-19.stratified_ds.2022A.pkl",
"source_or_target_dataset": "target",
"x_transforms": ["jitter_256_1", "take_200"],
"episode_transforms": [],
"domain_prefix": "W_A_",
},
{
"labels": [
"3123D52",
"3123D65",
"3123D79",
"3123D80",
"3123D54",
"3123D70",
"3123D7B",
"3123D89",
"3123D58",
"3123D76",
"3123D7D",
"3123EFE",
"3123D64",
"3123D78",
"3123D7E",
"3124E4A",
],
"domains": [32, 38, 8, 44, 14, 50, 20, 26],
"num_examples_per_domain_per_label": 2000,
"pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl",
"source_or_target_dataset": "source",
"x_transforms": ["jitter_256_1", "take_200", "resample_20Msps_to_25Msps"],
"episode_transforms": [],
"domain_prefix": "ORACLE.run1_",
},
],
}
# Set this to True if you want to run this template directly
STANDALONE = False
if STANDALONE:
print("parameters not injected, running with standalone_parameters")
parameters = standalone_parameters
if not 'parameters' in locals() and not 'parameters' in globals():
raise Exception("Parameter injection failed")
#Use an easy dict for all the parameters
p = EasyDict(parameters)
if "x_shape" not in p:
p.x_shape = [2,256] # Default to this if we dont supply x_shape
supplied_keys = set(p.keys())
if supplied_keys != required_parameters:
print("Parameters are incorrect")
if len(supplied_keys - required_parameters)>0: print("Shouldn't have:", str(supplied_keys - required_parameters))
if len(required_parameters - supplied_keys)>0: print("Need to have:", str(required_parameters - supplied_keys))
raise RuntimeError("Parameters are incorrect")
###################################
# Set the RNGs and make it all deterministic
###################################
np.random.seed(p.seed)
random.seed(p.seed)
torch.manual_seed(p.seed)
torch.use_deterministic_algorithms(True)
###########################################
# The stratified datasets honor this
###########################################
torch.set_default_dtype(eval(p.torch_default_dtype))
###################################
# Build the network(s)
# Note: It's critical to do this AFTER setting the RNG
###################################
x_net = build_sequential(p.x_net)
start_time_secs = time.time()
p.domains_source = []
p.domains_target = []
train_original_source = []
val_original_source = []
test_original_source = []
train_original_target = []
val_original_target = []
test_original_target = []
# global_x_transform_func = lambda x: normalize(x.to(torch.get_default_dtype()), "unit_power") # unit_power, unit_mag
# global_x_transform_func = lambda x: normalize(x, "unit_power") # unit_power, unit_mag
def add_dataset(
labels,
domains,
pickle_path,
x_transforms,
episode_transforms,
domain_prefix,
num_examples_per_domain_per_label,
source_or_target_dataset:str,
iterator_seed=p.seed,
dataset_seed=p.dataset_seed,
n_shot=p.n_shot,
n_way=p.n_way,
n_query=p.n_query,
train_val_test_k_factors=(p.train_k_factor,p.val_k_factor,p.test_k_factor),
):
if x_transforms == []: x_transform = None
else: x_transform = get_chained_transform(x_transforms)
if episode_transforms == []: episode_transform = None
else: raise Exception("episode_transforms not implemented")
episode_transform = lambda tup, _prefix=domain_prefix: (_prefix + str(tup[0]), tup[1])
eaf = Episodic_Accessor_Factory(
labels=labels,
domains=domains,
num_examples_per_domain_per_label=num_examples_per_domain_per_label,
iterator_seed=iterator_seed,
dataset_seed=dataset_seed,
n_shot=n_shot,
n_way=n_way,
n_query=n_query,
train_val_test_k_factors=train_val_test_k_factors,
pickle_path=pickle_path,
x_transform_func=x_transform,
)
train, val, test = eaf.get_train(), eaf.get_val(), eaf.get_test()
train = Lazy_Iterable_Wrapper(train, episode_transform)
val = Lazy_Iterable_Wrapper(val, episode_transform)
test = Lazy_Iterable_Wrapper(test, episode_transform)
if source_or_target_dataset=="source":
train_original_source.append(train)
val_original_source.append(val)
test_original_source.append(test)
p.domains_source.extend(
[domain_prefix + str(u) for u in domains]
)
elif source_or_target_dataset=="target":
train_original_target.append(train)
val_original_target.append(val)
test_original_target.append(test)
p.domains_target.extend(
[domain_prefix + str(u) for u in domains]
)
else:
raise Exception(f"invalid source_or_target_dataset: {source_or_target_dataset}")
for ds in p.datasets:
add_dataset(**ds)
# from steves_utils.CORES.utils import (
# ALL_NODES,
# ALL_NODES_MINIMUM_1000_EXAMPLES,
# ALL_DAYS
# )
# add_dataset(
# labels=ALL_NODES,
# domains = ALL_DAYS,
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "cores.stratified_ds.2022A.pkl"),
# source_or_target_dataset="target",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"cores_{u}"
# )
# from steves_utils.ORACLE.utils_v2 import (
# ALL_DISTANCES_FEET,
# ALL_RUNS,
# ALL_SERIAL_NUMBERS,
# )
# add_dataset(
# labels=ALL_SERIAL_NUMBERS,
# domains = list(set(ALL_DISTANCES_FEET) - {2,62}),
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl"),
# source_or_target_dataset="source",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"oracle1_{u}"
# )
# from steves_utils.ORACLE.utils_v2 import (
# ALL_DISTANCES_FEET,
# ALL_RUNS,
# ALL_SERIAL_NUMBERS,
# )
# add_dataset(
# labels=ALL_SERIAL_NUMBERS,
# domains = list(set(ALL_DISTANCES_FEET) - {2,62,56}),
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl"),
# source_or_target_dataset="source",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"oracle2_{u}"
# )
# add_dataset(
# labels=list(range(19)),
# domains = [0,1,2],
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "metehan.stratified_ds.2022A.pkl"),
# source_or_target_dataset="target",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"met_{u}"
# )
# # from steves_utils.wisig.utils import (
# # ALL_NODES_MINIMUM_100_EXAMPLES,
# # ALL_NODES_MINIMUM_500_EXAMPLES,
# # ALL_NODES_MINIMUM_1000_EXAMPLES,
# # ALL_DAYS
# # )
# import steves_utils.wisig.utils as wisig
# add_dataset(
# labels=wisig.ALL_NODES_MINIMUM_100_EXAMPLES,
# domains = wisig.ALL_DAYS,
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "wisig.node3-19.stratified_ds.2022A.pkl"),
# source_or_target_dataset="target",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"wisig_{u}"
# )
###################################
# Build the dataset
###################################
train_original_source = Iterable_Aggregator(train_original_source, p.seed)
val_original_source = Iterable_Aggregator(val_original_source, p.seed)
test_original_source = Iterable_Aggregator(test_original_source, p.seed)
train_original_target = Iterable_Aggregator(train_original_target, p.seed)
val_original_target = Iterable_Aggregator(val_original_target, p.seed)
test_original_target = Iterable_Aggregator(test_original_target, p.seed)
# For CNN We only use X and Y. And we only train on the source.
# Properly form the data using a transform lambda and Lazy_Iterable_Wrapper. Finally wrap them in a dataloader
transform_lambda = lambda ex: ex[1] # Original is (<domain>, <episode>) so we strip down to episode only
train_processed_source = Lazy_Iterable_Wrapper(train_original_source, transform_lambda)
val_processed_source = Lazy_Iterable_Wrapper(val_original_source, transform_lambda)
test_processed_source = Lazy_Iterable_Wrapper(test_original_source, transform_lambda)
train_processed_target = Lazy_Iterable_Wrapper(train_original_target, transform_lambda)
val_processed_target = Lazy_Iterable_Wrapper(val_original_target, transform_lambda)
test_processed_target = Lazy_Iterable_Wrapper(test_original_target, transform_lambda)
datasets = EasyDict({
"source": {
"original": {"train":train_original_source, "val":val_original_source, "test":test_original_source},
"processed": {"train":train_processed_source, "val":val_processed_source, "test":test_processed_source}
},
"target": {
"original": {"train":train_original_target, "val":val_original_target, "test":test_original_target},
"processed": {"train":train_processed_target, "val":val_processed_target, "test":test_processed_target}
},
})
from steves_utils.transforms import get_average_magnitude, get_average_power
print(set([u for u,_ in val_original_source]))
print(set([u for u,_ in val_original_target]))
s_x, s_y, q_x, q_y, _ = next(iter(train_processed_source))
print(s_x)
# for ds in [
# train_processed_source,
# val_processed_source,
# test_processed_source,
# train_processed_target,
# val_processed_target,
# test_processed_target
# ]:
# for s_x, s_y, q_x, q_y, _ in ds:
# for X in (s_x, q_x):
# for x in X:
# assert np.isclose(get_average_magnitude(x.numpy()), 1.0)
# assert np.isclose(get_average_power(x.numpy()), 1.0)
###################################
# Build the model
###################################
# easfsl only wants a tuple for the shape
model = Steves_Prototypical_Network(x_net, device=p.device, x_shape=tuple(p.x_shape))
optimizer = Adam(params=model.parameters(), lr=p.lr)
###################################
# train
###################################
jig = PTN_Train_Eval_Test_Jig(model, p.BEST_MODEL_PATH, p.device)
jig.train(
train_iterable=datasets.source.processed.train,
source_val_iterable=datasets.source.processed.val,
target_val_iterable=datasets.target.processed.val,
num_epochs=p.n_epoch,
num_logs_per_epoch=p.NUM_LOGS_PER_EPOCH,
patience=p.patience,
optimizer=optimizer,
criteria_for_best=p.criteria_for_best,
)
total_experiment_time_secs = time.time() - start_time_secs
###################################
# Evaluate the model
###################################
source_test_label_accuracy, source_test_label_loss = jig.test(datasets.source.processed.test)
target_test_label_accuracy, target_test_label_loss = jig.test(datasets.target.processed.test)
source_val_label_accuracy, source_val_label_loss = jig.test(datasets.source.processed.val)
target_val_label_accuracy, target_val_label_loss = jig.test(datasets.target.processed.val)
history = jig.get_history()
total_epochs_trained = len(history["epoch_indices"])
val_dl = Iterable_Aggregator((datasets.source.original.val,datasets.target.original.val))
confusion = ptn_confusion_by_domain_over_dataloader(model, p.device, val_dl)
per_domain_accuracy = per_domain_accuracy_from_confusion(confusion)
# Add a key to per_domain_accuracy for if it was a source domain
for domain, accuracy in per_domain_accuracy.items():
per_domain_accuracy[domain] = {
"accuracy": accuracy,
"source?": domain in p.domains_source
}
# Do an independent accuracy assesment JUST TO BE SURE!
# _source_test_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.test, p.device)
# _target_test_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.test, p.device)
# _source_val_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.val, p.device)
# _target_val_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.val, p.device)
# assert(_source_test_label_accuracy == source_test_label_accuracy)
# assert(_target_test_label_accuracy == target_test_label_accuracy)
# assert(_source_val_label_accuracy == source_val_label_accuracy)
# assert(_target_val_label_accuracy == target_val_label_accuracy)
experiment = {
"experiment_name": p.experiment_name,
"parameters": dict(p),
"results": {
"source_test_label_accuracy": source_test_label_accuracy,
"source_test_label_loss": source_test_label_loss,
"target_test_label_accuracy": target_test_label_accuracy,
"target_test_label_loss": target_test_label_loss,
"source_val_label_accuracy": source_val_label_accuracy,
"source_val_label_loss": source_val_label_loss,
"target_val_label_accuracy": target_val_label_accuracy,
"target_val_label_loss": target_val_label_loss,
"total_epochs_trained": total_epochs_trained,
"total_experiment_time_secs": total_experiment_time_secs,
"confusion": confusion,
"per_domain_accuracy": per_domain_accuracy,
},
"history": history,
"dataset_metrics": get_dataset_metrics(datasets, "ptn"),
}
ax = get_loss_curve(experiment)
plt.show()
get_results_table(experiment)
get_domain_accuracies(experiment)
print("Source Test Label Accuracy:", experiment["results"]["source_test_label_accuracy"], "Target Test Label Accuracy:", experiment["results"]["target_test_label_accuracy"])
print("Source Val Label Accuracy:", experiment["results"]["source_val_label_accuracy"], "Target Val Label Accuracy:", experiment["results"]["target_val_label_accuracy"])
json.dumps(experiment)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/lakshit2808/Machine-Learning-Notes/blob/master/ML_Models/Classification/KNearestNeighbor/KNN_first_try.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# K-Nearest Neighbor
**K-Nearest Neighbors** is an algorithm for supervised learning. Where the data is 'trained' with data points corresponding to their classification. Once a point is to be predicted, it takes into account the 'K' nearest points to it to determine it's classification.
### Here's an visualization of the K-Nearest Neighbors algorithm.
<img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%203/images/KNN_Diagram.png">
In this case, we have data points of Class A and B. We want to predict what the star (test data point) is. If we consider a k value of 3 (3 nearest data points) we will obtain a prediction of Class B. Yet if we consider a k value of 6, we will obtain a prediction of Class A.<br><br>
In this sense, it is important to consider the value of k. But hopefully from this diagram, you should get a sense of what the K-Nearest Neighbors algorithm is. It considers the 'K' Nearest Neighbors (points) when it predicts the classification of the test point.
## 1. Importing Libraries
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import preprocessing
```
## 2. Reading Data
```
df = pd.read_csv('teleCust.csv')
df.head()
```
## 3. Data Visualization and Analysis
#### Let’s see how many of each class is in our data set
```
df['custcat'].value_counts()
```
The target field, called **custcat**, has four possible values that correspond to the four customer groups, as follows:
1. Basic Service
2. E-Service
3. Plus Service
4. Total Service
```
df.hist(column='income' , bins=50)
```
### Feature Set
Let's Define a feature set: X
```
df.columns
```
To use scikit-learn library, we have to convert the Pandas data frame to a Numpy array:
```
X = df[['region', 'tenure', 'age', 'marital', 'address', 'income', 'ed',
'employ', 'retire', 'gender', 'reside']].values
X[0:5]
```
What are our labels?
```
y = df['custcat'].values
y[0:5]
```
### Normalize Data
Normalization in this case essentially means standardization. Standardization is the process of transforming data based on the mean and standard deviation for the whole set. Thus, transformed data refers to a standard distribution with a mean of 0 and a variance of 1.<br><br>
Data Standardization give data zero mean and unit variance, it is good practice, especially for algorithms such as KNN which is based on distance of cases:
```
X = preprocessing.StandardScaler().fit(X).transform(X.astype(float))
X[0:5]
```
## 4. Train/Test Split
```
from sklearn.model_selection import train_test_split
X_train , X_test , y_train , y_test = train_test_split(X , y , test_size= 0.2 , random_state = 4)
print ('Train set:', X_train.shape, y_train.shape)
print ('Test set:', X_test.shape, y_test.shape)
```
## 5. Classification(KNN)
```
from sklearn.neighbors import KNeighborsClassifier
```
### Training
Lets start the algorithm with k=4 for now:
```
all_acc = []
for i in range(1, 100):
KNN = KNeighborsClassifier(n_neighbors=i).fit(X_train , y_train)
all_acc.append(accuracy_score(y_test , KNN.predict(X_test)))
best_acc = max(all_acc)
best_k = all_acc.index(best_acc) + 1
KNN = KNeighborsClassifier(n_neighbors=best_k).fit(X_train , y_train)
```
### Prediction
```
y_ = KNN.predict(X_test)
y_[0:5]
```
## 6. Accuracy Evaluation
In multilabel classification, **accuracy classification score** is a function that computes subset accuracy. This function is equal to the jaccard_score function. Essentially, it calculates how closely the actual labels and predicted labels are matched in the test set.
```
from sklearn.metrics import accuracy_score
print('Train Set Accuracy: {}'.format(accuracy_score(y_train , KNN.predict(X_train))))
print('Ttest Set Accuracy: {}'.format(accuracy_score(y_test , KNN.predict(X_test))))
```
#### What about other K?
K in KNN, is the number of nearest neighbors to examine. It is supposed to be specified by the User. So, how can we choose right value for K?
The general solution is to reserve a part of your data for testing the accuracy of the model. Then chose k =1, use the training part for modeling, and calculate the accuracy of prediction using all samples in your test set. Repeat this process, increasing the k, and see which k is the best for your model.
We can calculate the accuracy of KNN for different K.
```
all_acc = []
for i in range(1, 100):
KNN = KNeighborsClassifier(n_neighbors=i).fit(X_train , y_train)
all_acc.append(accuracy_score(y_test , KNN.predict(X_test)))
best_acc = max(all_acc)
best_k = all_acc.index(best_acc) + 1
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
import seaborn as sns
%config InlineBackend.figure_format = 'retina'
from IPython.core.display import display, HTML
display(HTML("<style>.container {width:100% !important;}</style>"))
from scipy.special import gamma, factorial,digamma
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import matplotlib.gridspec as gridspec
import sys
sys.path.append(r'/Users/ys18223/Documents/GitHub/FLAIR_BiocomputeLab')
```
# Fig H
```
#Input MAPE performance accross simulations when estimating the mean fluorescence
d_mean = {'MAPE': [100*i for i in ([0.1674891 , 0.14371818, 0.12273398,
0.16679492, 0.13970324, 0.1015513 ,
0.16319497, 0.12743953, 0.06931147]+[0.51141972, 0.51385324, 0.51403695,
0.52769436, 0.51004928, 0.51341036,
0.53446 , 0.52250617, 0.5075517 ])]+[15.29211367, 14.14405139, 14.05101411]+[12.61702118, 10.50428435, 9.82247402]+[10.31754068, 7.2084087 , 4.77361639]+[16.35151345, 16.9359747 , 17.78217523]+[14.38362791, 14.93895699, 15.7100954 ]+[13.14528142, 13.4672431 , 14.25780018], 'distribution': ['Gamma']*18+['Lognormal']*18,'inference':['ML']*9+['MOM']*9+['ML']*9+['MOM']*9}
df_mean = pd.DataFrame(data=d_mean)
df_mean.head()
# Create the figure
fig = plt.figure(figsize=(11.7,8.3))
gs = gridspec.GridSpec(1, 1)
ax = plt.subplot(gs[0])
my_pal = {"ML": "#2463A3", "MOM": "#B5520E"}
ax=sns.violinplot(x="distribution", y="MAPE", hue="inference",
data=df_mean, palette=my_pal)
ax.set_ylabel('MAPE (mean) %')
ax.set_xlabel('')
# my_pal = ['#2463A3', '#B5520E','#2463A3', '#B5520E']
# INF=['ML','MOM','ML','MOM']
# color_dict = dict(zip(INF, my_pal ))
# for i in range(0,4):
# mybox = ax.artists[i]
# mybox.set_facecolor(color_dict[INF[i]])
#plt.legend(frameon=False,fontsize=12)
ax.get_legend().remove()
sns.despine()
width=3.54
height=3.54
fig.set_size_inches(width, height)
plt.subplots_adjust(hspace=.0 , wspace=.00, left=.15, right=.95, top=.95, bottom=.13)
plt.show()
```
# Fig I
```
#Input MAPE performance accross simulations when estimating the mean variance
d_var = {'MAPE': [56.51961891, 50.47877742, 46.13735704,
56.41471139, 48.30979619, 39.03006257,
56.08137685, 44.53477141, 27.01354216]+[287.74453306, 298.1863082 , 298.21313797,299.7961364 , 300.44014621, 311.36703739,
324.08161946, 323.83104867, 327.57942772]+[67.89211699, 64.24130949, 63.92732816]+[60.43748406, 50.92945822, 46.84127056]+[54.94239969, 39.2380389 , 24.5262507 ]+[195.21194215, 232.21351093, 238.5230456 ]+[219.98637949, 221.72468045, 217.98143615]+[226.76576441, 196.59937264, 221.02871965], 'distribution': ['Gamma']*18+['Lognormal']*18,'inference':['ML']*9+['MOM']*9+['ML']*9+['MOM']*9}
df_var = pd.DataFrame(data=d_var)
df_var.head()
# Create the figure
fig = plt.figure(figsize=(11.7,8.3))
gs = gridspec.GridSpec(1, 1)
ax = plt.subplot(gs[0])
my_pal = {"ML": "#2463A3", "MOM": "#B5520E"}
ax=sns.violinplot(x="distribution", y="MAPE", hue="inference",
data=df_var, palette=my_pal)
ax.set_ylabel('MAPE (standard deviation) %')
ax.set_xlabel('')
ax.get_legend().remove()
sns.despine()
width=3.54
height=3.54
fig.set_size_inches(width, height)
plt.subplots_adjust(hspace=.0 , wspace=.00, left=.15, right=.95, top=.95, bottom=.13)
plt.show()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/juancas9812/Inteligencia-Artificial-2020-3/blob/master/S5_Logistic_Regr_JCS.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Pontificia Universidad Javeriana
# Inteligencia Artificial 2020-30
Tarea Semana 5
Juan Camilo Sarmiento Peñuela
**1.** Implementar (adecuar) los dos métodos descritos en: https://ml-cheatsheet.readthedocs.io/en/latest/logistic_regression.html#id13 Con los datos en el csv en teams en la carpeta semana 5 (datos_multivariados.csv). Puede descargar el código también en el github del autor, pero por favor leer primero el link de arriba. https://github.com/bfortuner/ml-glossary
* Primer método - Regresión Logística Binaria:
> Primero se importan los módulos que se van a utilizar, los cuales son numpy para poder hacer uso de vectores y matrices, pandas para obtener los datos, y matplotlib para los graficos. Luego, se copian las funciones que se encuentran en el primer link, y se ajustan para que funcionen con el interprete de python 3, dado que estan escritas para python 2.
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(z):
return 1.0 / (1 + np.exp(-z))
def predict(features, weights):
z = np.dot(features,weights)
return sigmoid(z)
def cost_function(features, labels, weights):
'''
Using Mean Absolute Error
Features:(100,3)
Labels: (100,1)
Weights:(3,1)
Returns 1D matrix of predictions
Cost = (labels*log(predictions) + (1-labels)*log(1-predictions) ) / len(labels)
'''
observations = len(labels)
predictions = predict(features, weights)
#Take the error when label=1
class1_cost = -labels*np.log(predictions)
#Take the error when label=0
class2_cost = (1-labels)*np.log(1-predictions)
#Take the sum of both costs
cost = class1_cost - class2_cost
#Take the average cost
cost = cost.sum() / observations
return cost
def update_weights(features, labels, weights, lr):
'''
Vectorized Gradient Descent
Features:(200, 3)
Labels: (200, 1)
Weights:(3, 1)
'''
N = len(features)
#1 - Get Predictions
predictions = predict(features, weights)
#2 Transpose features from (200, 3) to (3, 200)
# So we can multiply w the (200,1) cost matrix.
# Returns a (3,1) matrix holding 3 partial derivatives --
# one for each feature -- representing the aggregate
# slope of the cost function across all observations
gradient = np.dot(features.T, predictions - labels)
#3 Take the average cost derivative for each feature
gradient /= N
#4 - Multiply the gradient by our learning rate
gradient *= lr
#5 - Subtract from our weights to minimize cost
weights -= gradient
return weights
def decision_boundary(prob):
return 1 if prob >= .5 else 0
def classify(predictions):
'''
input - N element array of predictions between 0 and 1
output - N element array of 0s (False) and 1s (True)
'''
decision_boundary_vec = np.vectorize(decision_boundary)
return decision_boundary_vec(predictions).flatten()
def train(features, labels, weights, lr, iters):
cost_history = []
for i in range(iters):
weights = update_weights(features, labels, weights, lr)
#Calculate error for auditing purposes
cost = cost_function(features, labels, weights)
cost_history.append(cost)
return weights, cost_history
def accuracy(predicted_labels, actual_labels):
diff = predicted_labels - actual_labels
return 1.0 - (float(np.count_nonzero(diff)) / len(diff))
def plot_decision_boundary(trues, falses):
fig = plt.figure()
ax = fig.add_subplot(111)
no_of_preds = len(trues) + len(falses)
ax.scatter([i for i in range(len(trues))], trues, s=25, c='b', marker="o", label='Trues')
ax.scatter([i for i in range(len(falses))], falses, s=25, c='r', marker="s", label='Falses')
plt.legend(loc='upper right');
ax.set_title("Decision Boundary")
ax.set_xlabel('N/2')
ax.set_ylabel('Predicted Probability')
plt.axhline(.5, color='black')
plt.show()
```
>Luego de tener todas las funciones, se procede a leer los datos del archivo data_classification.csv, y se implementan las funciones
```
data=pd.read_csv("data_classification.csv", sep=';',header=0)
arreglo=data.values
YCOL=2
Xb=arreglo[:,0:YCOL]
Xa=np.ones((len(Xb),1))
X=np.concatenate((Xa,Xb),axis=1)
Y=arreglo[:,YCOL]
YCOL2=5
thetas=np.array([-12.5, 1.3, 1])
Num_Iter=2500
alpha=0.5
cinit=112
features=X
labels=Y
for alpha in np.arange(0.4,0.5,0.001):
weights, cost=train(X, Y, thetas, alpha, Num_Iter)
if cost[-1]<cinit:
final_w=weights
cinit=cost[-1]
alphaopt=alpha
print(final_w)
print(cinit)
print(alphaopt)
count_true=0
count_false=0
acum=predict(X,final_w)
prob=classify(acum)
trues=[]
falses=[]
for i in np.arange(0,acum.size):
if prob[i]==Y[i]:
trues.append(acum[i])
else:
falses.append(acum[i])
plot_decision_boundary(trues,falses)
```
* Segundo Método - Regresión logística multiclase:
> Para este método se nececita importar algunos modulos de scikit-learn, debido a que se usa este modulo para la implementación de este método
```
import sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
```
> Ahora se hace la implementación:
```
# Normalize grades to values between 0 and 1 for more efficient computation
normalized_range = sklearn.preprocessing.MinMaxScaler(feature_range=(-1,1))
# Extract Features + Labels
labels.shape = (100,) #scikit expects this
features = normalized_range.fit_transform(features)
# Create Test/Train
features_train,features_test,labels_train,labels_test = train_test_split(features,labels,test_size=0.4,random_state=0)
# Scikit Logistic Regression
scikit_log_reg = LogisticRegression()
scikit_log_reg.fit(features_train,labels_train)
#Score is Mean Accuracy
scikit_score = scikit_log_reg.predict(features_test)
precision=accuracy(scikit_score,labels_test)*100
print('Scikit score: ', precision)
#Our Mean Accuracy
probabilities = predict(features, weights).flatten()
classifications = classify(probabilities)
our_acc = accuracy(classifications,labels.flatten())
print('Our score: ',our_acc)
```
**2.** Correr el ejemplo multiclase al final de: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html , comentar el uso y entradas de cada función que hagan uso de sklearn
```
#Se importa el dataset de Iris que se encuentra en en sklearn.datasets
from sklearn.datasets import load_iris
#Se importa la función para realizar la regresión logística. Se encuentra en
#sklearn.linear_model
from sklearn.linear_model import LogisticRegression
X, y = load_iris(return_X_y=True) #Se cargan las etiquetas del dataset
clf = LogisticRegression(random_state=0).fit(X, y) #Se entrena el modelo de la regresión logística
clf.predict(X[:2, :]) #Hace la predicción sobre cuál etiqueta pertenece cuál caracteristica
clf.predict_proba(X[:2, :]) #Retorna la matriz de probabilidades
clf.score(X, y) #Se calcula el valor de la exactitud obtenida por la regresión
```
**3.** Encontrar la derivada de J para un theta cualquiera de la Regresión Logística. Usarla para confirmar las ecuaciones de descenso del gradiente en las presentaciones.

---
# Conclusiones
* Se encontraron problemas al intentar importar y usar los datos. Por esto se recomienda ver el tipo de dato y en caso de ser posible la forma, ya que se va a trabajar con matrices y vectores.
* Asimismo, es importante revisar el tipo de dato que se requiere para utilizar ciertos métodos para las clases que utilizan los diferentes módulos.
* En el caso de la regresión logística binaria, como se estaba usando el descenso del gradiente, se encuentra que al cambiar los valores de los thetas, se converge siempre a los mismos valores de los pesos y del alpha, los cuales son [-16.1602695 1.74538164 1.48650834] y 0.4980000000000001 respectivamente
| github_jupyter |
```
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
import scqubits as scq
import scqubits.legacy.sweep_plotting as splot
from scqubits import HilbertSpace, InteractionTerm, ParameterSweep
import numpy as np
```
.. note::
This describes a legacy version of the `HilbertSpace` class which is deprecated with scqubits v1.4.
# Composite Hilbert Spaces, QuTiP Interface
The `HilbertSpace` class provides data structures and methods for handling composite Hilbert spaces which may consist of multiple qubits or qubits and oscillators coupled to each other. To harness the power of QuTiP, a toolbox for studying stationary and dynamical properties of closed and open quantum systems (and much more), `HilbertSpace` provides a convenient interface: it generates `qutip.qobj` objects which are then directly handled by QuTiP.
## Example: two transmons coupled to a harmonic mode
Transmon qubits can be capacitively coupled to a common harmonic mode, realized by an LC oscillator or a transmission-line resonator. The Hamiltonian describing such a composite system is given by:
\begin{equation}
H=H_\text{tmon,1} + H_\text{tmon,2} + \omega_r a^\dagger a + \sum_{j=1,2}g_j n_j(a+a^\dagger),
\end{equation}
where $j=1,2$ enumerates the two transmon qubits, $\omega_r$ is the (angular) frequency of the resonator. Furthermore, $n_j$ is the charge number operator for qubit $j$, and $g_j$ is the coupling strength between qubit $j$ and the resonator.
### Create Hilbert space components
The first step consists of creating the objects describing the individual building blocks of the full Hilbert space. Here, these will be the two transmons and one oscillator:
```
tmon1 = scq.Transmon(
EJ=40.0,
EC=0.2,
ng=0.3,
ncut=40,
truncated_dim=4 # after diagonalization, we will keep 3 levels
)
tmon2 = scq.Transmon(
EJ=15.0,
EC=0.15,
ng=0.0,
ncut=30,
truncated_dim=4
)
resonator = scq.Oscillator(
E_osc=4.5,
truncated_dim=4 # up to 3 photons (0,1,2,3)
)
```
The system objects are next grouped into a Python list, and in this form used for the initialization of a `HilbertSpace` object. Once created, a print call to this object outputs a summary of the composite Hilbert space.
```
hilbertspace = scq.HilbertSpace([tmon1, tmon2, resonator])
print(hilbertspace)
```
One useful method of the `HilbertSpace` class is `.bare_hamiltonian()`. This yields the bare Hamiltonian of the non-interacting subsystems, expressed as a `qutip.Qobj`:
```
bare_hamiltonian = hilbertspace.bare_hamiltonian()
bare_hamiltonian
```
### Set up the interaction between subsystems
The pairwise interactions between subsystems are assumed to have the general form
$V=\sum_{i\not= j} g_{ij} A_i B_j$,
where $g_{ij}$ parametrizes the interaction strength between subsystems $i$ and $j$. The operator content of the coupling is given by the two coupling operators $A_i$, $B_j$, which are operators in the two respective subsystems.
This structure is captured by setting up an `InteractionTerm` object:
```
g1 = 0.1 # coupling resonator-CPB1 (without charge matrix elements)
g2 = 0.2 # coupling resonator-CPB2 (without charge matrix elements)
interaction1 = InteractionTerm(
hilbertspace = hilbertspace,
g_strength = g1,
op1 = tmon1.n_operator(),
subsys1 = tmon1,
op2 = resonator.creation_operator() + resonator.annihilation_operator(),
subsys2 =resonator
)
interaction2 = InteractionTerm(
hilbertspace = hilbertspace,
g_strength = g2,
op1 = tmon2.n_operator(),
subsys1 = tmon2,
op2 = resonator.creation_operator() + resonator.annihilation_operator(),
subsys2 = resonator
)
```
Each `InteractionTerm` object is initialized by specifying
1. the Hilbert space object to which it will belong
2. the interaction strength coefficient $g_{ij}$
3. `op1`, `op2`: the subsystem operators $A_i$, $B_j$ (these should be operators within the subsystems' respective Hilbert spaces only)
4. `subsys1`: the subsystem objects to which `op1` and `op2` belong
Note: interaction Hamiltonians of the alternative form $V=g_{ij}A_i B_j^\dagger + g_{ij}^* A_i^\dagger B_J$ (a typical form when performing rotating-wave approximation) can be specified by setting `op1` to $A_i$ and `op2` to $B_j^\dagger$, and providing the additional keyword parameter `add_hc = True`.
Now, collect all interaction terms in a list, and insert into the HilbertSpace object.
```
interaction_list = [interaction1, interaction2]
hilbertspace.interaction_list = interaction_list
```
With the interactions specified, the full Hamiltonian of the coupled system can be obtained via the method `.hamiltonian()`. Again, this conveniently results in a `qubit.Qobj` operator:
```
dressed_hamiltonian = hilbertspace.hamiltonian()
dressed_hamiltonian
```
### Obtaining the eigenspectrum via QuTiP
Since the Hamiltonian obtained this way is a proper `qutip.qobj`, all QuTiP routines are now available. In the first case, we are still making use of the scqubit `HilbertSpace.eigensys()` method. In the second, case, we use QuTiP's method `.eigenenergies()`:
```
evals, evecs = hilbertspace.eigensys(evals_count=4)
print(evals)
dressed_hamiltonian = hilbertspace.hamiltonian()
dressed_hamiltonian.eigenenergies()
```
| github_jupyter |
# RDD basics
This notebook will introduce **three basic but essential Spark operations**. Two of them are the transformations map and filter. The other is the action collect. At the same time we will introduce the concept of persistence in Spark.
## Getting the data and creating the RDD
We will use the reduced dataset (10 percent) provided for the KDD Cup 1999, containing nearly half million network interactions. The file is provided as a Gzip file that we will download locally.
```
import urllib
f = urllib.urlretrieve ("http://kdd.ics.uci.edu/databases/kddcup99/kddcup.data_10_percent.gz", "kddcup.data_10_percent.gz")
```
Now we can use this file to create our RDD.
```
data_file = "./kddcup.data_10_percent.gz"
raw_data = sc.textFile(data_file)
```
## The filter transformation
This transformation can be applied to RDDs in order to keep just elements that satisfy a certain condition. More concretely, a function is evaluated on every element in the original RDD. The new resulting RDD will contain just those elements that make the function return True.
For example, imagine we want to count how many normal. interactions we have in our dataset. We can filter our raw_data RDD as follows.
```
normal_raw_data = raw_data.filter(lambda x: 'normal.' in x)
```
Now we can count how many elements we have in the new RDD.
```
from time import time
t0 = time()
normal_count = normal_raw_data.count()
tt = time() - t0
print "There are {} 'normal' interactions".format(normal_count)
print "Count completed in {} seconds".format(round(tt,3))
```
The **real calculations** (distributed) in Spark **occur when we execute actions and not transformations.** In this case counting is the action that we execute in the RDD. We can apply as many transformations as we would like in a RDD and no computation will take place until we call the first action which, in this case, takes a few seconds to complete.
## The map transformation
By using the map transformation in Spark, we can apply a function to every element in our RDD. **Python's lambdas are specially expressive for this particular.**
In this case we want to read our data file as a CSV formatted one. We can do this by applying a lambda function to each element in the RDD as follows.
```
from pprint import pprint
csv_data = raw_data.map(lambda x: x.split(","))
t0 = time()
head_rows = csv_data.take(5)
tt = time() - t0
print "Parse completed in {} seconds".format(round(tt,3))
pprint(head_rows[0])
```
Again, **all action happens once we call the first Spark action** (i.e. take in this case). What if we take a lot of elements instead of just the first few?
```
t0 = time()
head_rows = csv_data.take(100000)
tt = time() - t0
print "Parse completed in {} seconds".format(round(tt,3))
```
We can see that it takes longer. The map function is applied now in a distributed way to a lot of elements on the RDD, hence the longer execution time.
## Using map and predefined functions
Of course we can use predefined functions with map. Imagine we want to have each element in the RDD as a key-value pair where the key is the tag (e.g. normal) and the value is the whole list of elements that represents the row in the CSV formatted file. We could proceed as follows.
```
def parse_interaction(line):
elems = line.split(",")
tag = elems[41]
return (tag, elems)
key_csv_data = raw_data.map(parse_interaction)
head_rows = key_csv_data.take(5)
pprint(head_rows[0])
```
## The collect action
**Basically it will get all the elements in the RDD into memory for us to work with them.** For this reason it has to be used with care, specially when working with large RDDs.
An example using our raw data.
```
t0 = time()
all_raw_data = raw_data.collect()
tt = time() - t0
print "Data collected in {} seconds".format(round(tt,3))
```
Every Spark worker node that has a fragment of the RDD has to be coordinated in order to retrieve its part, and then reduce everything together.
As a last example combining all the previous, we want to collect all the normal interactions as key-value pairs.
```
# get data from file
data_file = "./kddcup.data_10_percent.gz"
raw_data = sc.textFile(data_file)
# parse into key-value pairs
key_csv_data = raw_data.map(parse_interaction)
# filter normal key interactions
normal_key_interactions = key_csv_data.filter(lambda x: x[0] == "normal.")
# collect all
t0 = time()
all_normal = normal_key_interactions.collect()
tt = time() - t0
normal_count = len(all_normal)
print "Data collected in {} seconds".format(round(tt,3))
print "There are {} 'normal' interactions".format(normal_count)
```
This count matches with the previous count for normal interactions. The new procedure is more time consuming. This is because we retrieve all the data with collect and then use Python's len on the resulting list. Before we were just counting the total number of elements in the RDD by using count.
| github_jupyter |
## 支持向量机 (support vector machines, SVM)
> 支持向量机(support vector machines,SVM)是一种二分类模型,它将实例的特征向量映射为空间中的一些点,SVM 的目的就是想要画出一条线,以 “最好地” 区分这两类点,以至如果以后有了新的点,这条线也能做出很好的分类。SVM 适合中小型数据样本、非线性、高维的分类问题
SVM学习的基本想法是
求解能够正确划分训练数据集并且几何间隔最大的分离超平面
对于线性可分的数据集来说,这样的超平面有无穷多个(即感知机),但是几何间隔最大的分离超平面却是唯一的。
Advantages 优势:
* Effective in high dimensional spaces. 在高维空间中有效。
* Still effective in cases where number of dimensions is greater than the number of samples. 在尺寸数大于样本数的情况下仍然有效。
* Uses a subset of training points in the decision function (called support vectors), so it is also memory efficient.
在决策函数中使用训练点的子集(称为支持向量),因此它也具有记忆效率。
* Versatile: different Kernel functions can be specified for the decision function. Common kernels are provided, but it is also possible to specify custom kernels.
通用:可以为决策函数指定不同的核函数。提供了通用内核,但也可以指定自定义内核。
disadvantages 缺点:
* If the number of features is much greater than the number of samples, avoid over-fitting in choosing Kernel functions and regularization term is crucial.
当特征个数远大于样本个数时,在选择核函数和正则化项时应避免过拟合。
* SVMs do not directly provide probability estimates, these are calculated using an expensive five-fold cross-validation (see Scores and probabilities, below).
支持向量机不直接提供概率估计,这些是使用昂贵的五倍交叉验证计算的(见下面的分数和概率)。
[支持向量机](https://blog.csdn.net/qq_31347869/article/details/88071930)
[sklearn文档-svm](https://scikit-learn.org/dev/modules/svm.html#svm)
The sklearn.svm module includes Support Vector Machine algorithms.
|Estimators | description |
|:---- |:---- |
| svm.LinearSVC([penalty, loss, dual, tol, C, …]) | Linear Support Vector Classification. |
| svm.LinearSVR(*[, epsilon, tol, C, loss, …]) | Linear Support Vector Regression. |
| svm.NuSVC(*[, nu, kernel, degree, gamma, …]) | Nu-Support Vector Classification. |
| svm.NuSVR(*[, nu, C, kernel, degree, gamma, …]) | Nu Support Vector Regression. |
| svm.OneClassSVM(*[, kernel, degree, gamma, …]) | Unsupervised Outlier Detection. |
| svm.SVC(*[, C, kernel, degree, gamma, …]) | C-Support Vector Classification. |
| svm.SVR(*[, kernel, degree, gamma, coef0, …]) | Epsilon-Support Vector Regression. |
### Classification 使用支持向量机做分类任务
SVC, NuSVC and LinearSVC are classes capable of performing binary and multi-class classification on a dataset.
```
from sklearn.svm import SVC
import numpy as np
X = np.random.randint(0,10,(50,2))
y = (X[:,0] + X[:,1]) // 10
clf = SVC()
clf.fit(X, y)
clf.predict([[2., 7.],[3,9]]) # 结果会不一样
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
# import some data to play with
iris = datasets.load_iris()
# Take the first two features. We could avoid this by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
models = (svm.SVC(kernel='linear', C=C),
svm.LinearSVC(C=C, max_iter=10000),
svm.SVC(kernel='rbf', gamma=0.7, C=C),
svm.SVC(kernel='poly', degree=3, gamma='auto', C=C))
models = (clf.fit(X, y) for clf in models)
# title for the plots
titles = ('SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel')
# Set-up 2x2 grid for plotting.
fig, sub = plt.subplots(2, 2)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
for clf, title, ax in zip(models, titles, sub.flatten()):
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('Sepal length')
ax.set_ylabel('Sepal width')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
```
Regression
There are three different implementations of Support Vector Regression: SVR, NuSVR and LinearSVR. LinearSVR provides a faster implementation than SVR but only considers the linear kernel, while NuSVR implements a slightly different formulation than SVR and LinearSVR. See Implementation details for further details.
```
from sklearn.svm import SVR
X = [[0, 0], [2, 2]]
y = [0.5, 2.5]
regr = SVR()
regr.fit(X, y)
regr.predict([[1, 1]])
```
Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
OneClassSVM is based on libsvm.
```
from sklearn.svm import OneClassSVM
X = [[0], [0.44], [0.45], [0.46], [1]]
clf = OneClassSVM(gamma='auto')
clf.fit(X)
result = clf.predict(X)
print(result)
scores = clf.score_samples(X)
print(scores)
```
### 使用SVM做异常检测算法
Comparing anomaly detection algorithms for outlier detection on toy datasets
[refrence](https://scikit-learn.org/dev/auto_examples/miscellaneous/plot_anomaly_comparison.htm)
```
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Albert Thomas <albert.thomas@telecom-paristech.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import sklearn
print(sklearn.__version__)
from sklearn.datasets import make_moons, make_blobs
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
from sklearn.svm import OneClassSVM
from sklearn.kernel_approximation import Nystroem
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import SGDOneClassSVM
print(__doc__)
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
# Example settings
n_samples = 300
outliers_fraction = 0.15
n_outliers = int(outliers_fraction * n_samples)
n_inliers = n_samples - n_outliers
# define outlier/anomaly detection methods to be compared.
# the SGDOneClassSVM must be used in a pipeline with a kernel approximation
# to give similar results to the OneClassSVM
anomaly_algorithms = [
("Robust covariance", EllipticEnvelope(contamination=outliers_fraction)),
("One-Class SVM", OneClassSVM(nu=outliers_fraction, kernel="rbf",
gamma=0.1)),
("One-Class SVM (SGD)", make_pipeline(
Nystroem(gamma=0.1, random_state=42, n_components=150),
SGDOneClassSVM(nu=outliers_fraction, shuffle=True,
fit_intercept=True, random_state=42, tol=1e-6)
)),
("Isolation Forest", IsolationForest(contamination=outliers_fraction,
random_state=42)),
("Local Outlier Factor", LocalOutlierFactor(
n_neighbors=35, contamination=outliers_fraction))]
# Define datasets
blobs_params = dict(random_state=0, n_samples=n_inliers, n_features=2)
datasets = [
make_blobs(centers=[[0, 0], [0, 0]], cluster_std=0.5,
**blobs_params)[0],
make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[0.5, 0.5],
**blobs_params)[0],
make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[1.5, .3],
**blobs_params)[0],
4. * (make_moons(n_samples=n_samples, noise=.05, random_state=0)[0] -
np.array([0.5, 0.25])),
14. * (np.random.RandomState(42).rand(n_samples, 2) - 0.5)]
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 150),
np.linspace(-7, 7, 150))
plt.figure(figsize=(len(anomaly_algorithms) * 2 + 4, 12.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
rng = np.random.RandomState(42)
for i_dataset, X in enumerate(datasets):
# Add outliers
X = np.concatenate([X, rng.uniform(low=-6, high=6, size=(n_outliers, 2))],
axis=0)
for name, algorithm in anomaly_algorithms:
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
plt.subplot(len(datasets), len(anomaly_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
# fit the data and tag outliers
if name == "Local Outlier Factor":
y_pred = algorithm.fit_predict(X)
else:
y_pred = algorithm.fit(X).predict(X)
# plot the levels lines and the points
if name != "Local Outlier Factor": # LOF does not implement predict
Z = algorithm.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='black')
colors = np.array(['#377eb8', '#ff7f00'])
plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[(y_pred + 1) // 2])
plt.xlim(-7, 7)
plt.ylim(-7, 7)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
```
| github_jupyter |
## 第13讲 认识和绘制数轴
### Problem 问题描述
在长宽分别为600和400像素的绘图区域绘制如下图所示的一条标有刻度、水平方向的带箭头指示方向的数轴。其中数轴的左右两端距离绘图区域左右边界均为20像素,相邻刻度的距离为50像素,刻度线的长度为20像素,表示刻度线数值的数字在刻度线的正下方且底端距离数轴20个像素。
<img src="figures/L013_axis.png" width="600px"/>
### Math Background 数学背景
1. 数轴的构成
2. 数轴原点,正负数在数轴轴上的位置比较
3. 每一个数在数轴上都有一个点相对应,两个数的差在数轴上表示的是这两个数对应的两个点之间的距离。
### Prerequisites 预备知识
#### 1. `write`方法可以在绘图区书写字符串
```
from turtle import setup, reset, pu, pd, bye, left, right, fd, bk, screensize
from turtle import goto, seth, write, ht, st, home
width, height = 600, 400 # 窗口的宽度和高度(单位为:像素)
setup(width, height, 0, 0)
```
比较提起画笔和放下画笔时下面的代码执行的效果有什么不同
```
reset()
pu()
write("Sophie", move=True, align="center")
reset()
pd()
write("Tony", move=True, align="center", font=("Arial", 30, "normal"))
reset()
pd()
write("Sophie", move=False, align="center", font=("Arial", 30, "normal"))
```
#### 2. `tuple`元组数据类型
pos_x 是一个tuple类型的变量
```
pos_x = (30, 20) # pos_x 是一个tuple类型的变量
```
可以使用索引来获取tuple类型变量的元素
```
print(pos_x[0], pos_x[1]) # 可以使用索引来获取tuple类型变量的元素
```
可以使用len()方法来获取tuple类型数据的元素个数
```
len(pos_x) # 可以使用len()方法来获取tuple类型数据的元素个数
```
不可以更改tuple类型变量里某一个元素的值。例如执行下面的代码将发生错误
```python
pos_x[0] = 40
```
```text
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-24-d852e9299be9> in <module>
----> 1 pos_x[0] = 40
TypeError: 'tuple' object does not support item assignment
```
```
# pos_x[0] = 40 # 不可以更改tuple类型变量里某一个元素的值
```
可以给整个tuple类型变量赋予一个新的tuple值
```
pos_x = (50, 30) # 可以给整个tuple类型变量赋予一个新的tuple值
```
`tuple`型的变量里的元素的类型可以互不相同
```
sophie = ("Sophie", 11, "Female", "Grade4") # tuple型的变量里的元素的类型可以互不相同
print(sophie)
```
#### 3. 理解同一个方法在接受不同的参数值时执行结果的比较
对比输出的文字和海龟位置,观察下面的两条`write`方法在接受不同的`align`值或时效果有什么不同。
```
reset()
pu()
write("Jason", align="left", font=("Arial", 30, "normal"))
reset()
pu()
write("Jason", align="center", font=("Arial", 30, "normal"))
```
#### 4. 对比输出的文字,观察下面的几条`write`方法在接受不同的`font`值时效果有什么不同。
参数`font`是一个`tuple`类型的变量
```
reset()
pu()
write("Jason", font=("Arial", 30, "normal"))
reset()
pu()
write("Jason", font=("Arial", 50, "normal"))
reset()
pu()
write("Jason", font=("Times New Roman", 50, "normal"))
reset()
pu()
write("Jason", font=("Arial", 50, "italic"))
# no reset() here 这里没有reset()
pu()
bk(200) # 后退200
write("Jason", font=("Arial", 50, "underline"))
```
#### 5. 区分`()`何时表示元组数据何时表示方法接受的参数
看`()`前面有没有紧跟一个方法名, 下面这行代码声明了一个元组型变量,变量名为`jason`。
```python
jason = ("Jason", ("Arial", 50, "Italic")) #
```
下面这行代码是在执行一个名为`jason`的方法
```python
jason("Jason", ("Arial", 50, "Italic"))
```
#### 6. 练习
编写下面的代码,更改变量`name`的值为你的名字,观察代码执行的效果。
```
# 如果没有导入绘图库相关方法以及执行setup方法,请解除下面几行代码的注释
# from turtle import setup, reset, pu, pd, bye, left, right, fd, bk, screensize
# from turtle import goto, seth, write, ht, st, home
# width, height = 600, 400 # 窗口的宽度和高度(单位为:像素)
# setup(600, 400, 0, 0)
reset()
pu()
ht()
name = "Qiang"
text = "My name is {}.\nNice to meet you.".format(name)
write(text, align="center", font=("Arial", 30, "italic"))
```
### Solution 编程求解
```
from turtle import setup, reset, pu, pd, bye, left, right, fd, bk, screensize
from turtle import goto, seth, write, ht, st, home
width, height = 600, 400 # 窗口的宽度和高度(单位为:像素)
setup(width, height, 0, 0)
origin = (0, 0) # 原点的位置
padding = 20 # 数轴端点距离绘图区边界距离
max_x = width/2 - padding # x轴最大值
show_arrow = True # 是否显示箭头
mark_interval = 50 # 刻度线间距
mark_line_length = 10 # 刻度线高度
text_offset = 20 # 坐标值距离坐标线的距离
mark_degree = 90 # 坐标刻度与坐标轴夹角
arrow_length = 100 # 箭头长度
arrow_degree = 30 # 箭头与坐标轴夹角
delta_x = 1 # 每次坐标值变化的幅度
# Solution1: without using goto() 第一种方法:不使用goto()
reset() # 重置绘图区 # this puts turtle in the midile and it erase all for the page.
min_x = -1 * max_x # 根据坐标轴允许的最大值,获取该坐标轴允许的最小值
pu() # 提起画笔,暂停绘图
home() # Move turtle to the origin – coordinates (0,0) 移动小海龟至初始位置
# and set its heading to its start-orientation 并设置朝向为初始朝向
bk(max_x) # backward max_x
pd() # 落下画笔,准备绘图
# draw mark 绘制刻度线
cur_x, last_x = min_x, min_x # 海龟当前位置和最近一次绘图后停留的位置
while cur_x <= max_x: # 循环
if cur_x % mark_interval == 0: # 海龟的位置是相邻刻度间隔长度的整数倍
length_move = cur_x - last_x # 计算海龟应该前进的长度
pd() # 落下画笔,准备绘图
fd(length_move) # 海龟前进(绘制一小段)
left(mark_degree) # 向左转90度,海龟朝正上方,准备绘制刻度线
fd(mark_line_length) # 绘制刻度线
pu() # 把画笔提起暂停绘图
bk(mark_line_length + text_offset) # 后退(向下)一段长度
text = str(int(cur_x // mark_interval))
# 准备刻度值字符串(由整型数据转换而来)
write(text, align="center") # 在当前位置以居中的形式书写文字字符串
fd(text_offset) # 前进(向上)一小段长度
right(mark_degree) # 向右转90度,海龟次朝向右侧
last_x = cur_x # 记录海龟当前位置,为下次绘图的起点
cur_x += delta_x # 当前位置增加一小段长度(个单位距离:像素)
pd() # 落下画笔,准备绘制
fd(max_x - last_x) # 绘制最后一个刻度线到数轴最大x值这一小段
if show_arrow: # 如果需要绘制箭头
right(arrow_degree) # 向右转,海龟朝向右侧偏下
bk(arrow_length) # 后退一定距离,绘制箭头一边
fd(arrow_length) # 回到max_x位置
left(arrow_degree * 2) # 向左转,海龟朝向右侧偏上
bk(arrow_length) # 后退一定距离,绘制箭头另一边
ht() # 隐藏海龟
# Solution2: using goto() 第二种方法:使用goto()
reset()
min_x = -1 * max_x
# draw line
pu() # 提起画笔,暂停绘图
home() # Move turtle to the origin – coordinates (0,0) 移动小海龟至初始位置
# and set its heading to its start-orientation 并设置朝向为初始朝向
goto(min_x, 0) # go to the left end of the line 移动海龟到坐标轴直线的最左端
pd() # 落下画笔,准备绘图
goto(max_x, 0) # go to the right end of the line 移动海龟到坐标轴直线的最右段
# draw mark 绘制刻度线
cur_x = min_x # cur_x is min_x
while cur_x <= max_x:
if cur_x % mark_interval == 0:
pu() # pen up
goto(cur_x, 0) # go to cur_x fof x and 0 for y
pd() # pen down
goto(cur_x, mark_line_length) # 绘制刻度线
pu() # pen up
goto(cur_x, -text_offset) # go to cur_x for x nd -text_offset for y.
pd() # pen down
text = str(int(cur_x//mark_interval)) # text is str(int(cur_x//mark_interval))
write(text, align="center") # 书写刻度值
cur_x += delta_x # cur_x is delta_x + delta_x
if show_arrow: # if you need to draw arrows
arrow_x, arrow_y = max_x - 10, -5
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, arrow_y) # go to arrow_x for x and arrow_y for y
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, -arrow_y) # go to arrow_x for x and arrow_y for y
ht() # hide turtle
reset()
if show_arrow: # if you need to draw arrows
arrow_x, arrow_y = max_x - 100, -50
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, arrow_y) # go to arrow_x for x and arrow_y for y
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, -arrow_y)# go to arrow_x for x and arrow_y for
goto(max_x, 0)
if show_arrow: # 如果需要绘制箭头
right(arrow_degree) # 向右转,海龟朝向右侧偏下
bk(arrow_length) # 后退一定距离,绘制箭头一边
fd(arrow_length) # 回到max_x位置
left(arrow_degree * 2) # 向左转,海龟朝向右侧偏上
bk(arrow_length) # 后退一定距离,绘制箭头另一边
# longer
ht() # 隐藏海龟
bye()
```
### Summary 知识点小结
1. turtle绘图库里的新方法`write`可以在绘图区海龟的当前位置书写文字;
2. 新的数据类型:`tuple`元组数据类型,它与`list`数据类型非常类似,但也有区别;
3. 在执行一个方法时,方法名后面的小括号`()`内可以接受一个或多个不同的数据,这些数据成为该方法可以接受的参数。方法接受的参数的值不一样,执行该方法最后得到的结果也通常不同;
4. 复习格式化字符串的`format`方法;
5. 复习`while`循环,并将`while`过程中循环应用到绘图过程中;
6. 复习操作符`//`和`%`。
### 计算机小知识
像素,字体`font`
### Assignments 作业
1. 仔细阅读本讲示例中给出的两种绘制坐标轴方法,回答下面的问题:
Read carefully the two solutions demonstrated in the lecture, answer the following questions:
1. 给第二种方法中的每一行代码添加注释
Add comments for every code line of the second solution to tell the meaning of each code line.
2. 比较并说出两种方法在绘制坐标轴的差别
Compare the two solutions and tell the difference of them in drawing the axis.
3. 两种方法绘制出来的箭头一模一样吗?为什么?
Are the arrows drawn by the two solutoins exactly same? why?
(B. the first draws a little part of the line and then it draws a mark line
the second draws the line first and then it going back to draw the line marks)
(C. no because the arrow drawn by the second has a bigger. )
2. 编程绘制如下图所示的水平坐标轴。所用的刻度间距、刻度线长度等排版指标军与本讲示例相同。其中,与本讲示例不同的是:
By programming, draw horizontal axies as the following figure shows. Most of the parameters, including the marker interval, marker length, etc, have the same value as in the lecture. However, there are still some significant differences, which are:
1. 将表示0刻度坐标值的文字“0”的位置向右移动距离10,刻度线仍保持与相邻的刻度线等距离不变; Move the text "0", which indicating the value 0 on the axis, 10 pixels right to its original position. Keep the mark line where it is.
2. 在箭头的下方添加字母"x",字母"x"使用的字体是"Arial",字号大小为10,风格为“斜体”。Add a letter "x" under the arrow at the right end of the axis, use font "Arial", size 10, and "italic" to write the "x"
3. 当调整绘图区域的大小为宽为800像素时,你的代码应该仅需要更新绘图区的宽度而不改变其他地方就能直接调整数轴长度和刻度的显示。When the width of drawing area changed to 800 pixels from 600 pixels, your codes should only need to change the value of `width` while keep others unchanged to draw the axis with new length and markers.
<img src="figures/L013_assignment1.png" />
<img src="figures/L013_assignment1_2.png" />
```
from turtle import setup, reset, pu, pd, bye, left, right, fd, bk, screensize
from turtle import goto, seth, write, ht, st, home, speed
width, height = 400, 500 # 窗口的宽度和高度(单位为:像素)
setup(width, height, 0, 0)
origin = (0, 0) # 原点的位置
padding = 20 # 数轴端点距离绘图区边界距离
max_x = width/2 - padding # x轴最大值
show_arrow = True # 是否显示箭头
mark_interval = 50 # 刻度线间距
mark_line_length = 10 # 刻度线高度
text_offset = 20 # 坐标值距离坐标线的距离
mark_degree = 90 # 坐标刻度与坐标轴夹角
arrow_length = 10 # 箭头长度
arrow_degree = 30 # 箭头与坐标轴夹角
delta_x = 1
origin = (0, 0) # 原点的位置
padding = 20 # 数轴端点距离绘图区边界距离
max_x = width/2 - padding # x轴最大值
show_arrow = True # 是否显示箭头
mark_interval = 50 # 刻度线间距
mark_line_length = 10 # 刻度线高度
text_offset = 20 # 坐标值距离坐标线的距离
mark_degree = 90 # 坐标刻度与坐标轴夹角
arrow_length = 10 # 箭头长度
arrow_degree = 30 # 箭头与坐标轴夹角
delta_x = 1 # 每次坐标值变化的幅度
reset()
# Solution2: using goto() 第二种方法:使用goto()
reset()
min_x = -1 * max_x
# draw line
pu() # 提起画笔,暂停绘图
home() # Move turtle to the origin – coordinates (0,0) 移动小海龟至初始位置
# and set its heading to its start-orientation 并设置朝向为初始朝向
goto(min_x, 0) # go to the left end of the line 移动海龟到坐标轴直线的最左端
pd() # 落下画笔,准备绘图
goto(max_x, 0) # go to the right end of the line 移动海龟到坐标轴直线的最右段
# draw mark 绘制刻度线
cur_x = min_x # cur_x is min_x
while cur_x <= max_x:
if cur_x % mark_interval == 0:
pu() # pen up
goto(cur_x, 0) # go to cur_x fof x and 0 for y
pd() # pen down
goto(cur_x, mark_line_length) # 绘制刻度线
pu() # pen up
goto(cur_x, -text_offset) # go to cur_x for x nd -text_offset for y.
pd() # pen down
if cur_x == 0:
pu()
fd(10)
pd()
text = str(int(cur_x//mark_interval)) # text is str(int(cur_x//mark_interval))
write(text, align="center") # 书写刻度值
cur_x += delta_x # cur_x is delta_x + delta_x
if show_arrow: # if you need to draw arrows
arrow_x, arrow_y = max_x - 10, -5
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, arrow_y) # go to arrow_x for x and arrow_y for y
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, -arrow_y) # go to arrow_x for x and arrow_y for y
pu()
goto(max_x, 0)
right(90)
fd(text_offset)
write("x", move=False, align="center", font=("Arial", 10, "italic"))
ht() # hide turtle
ht()
st()
goto(arrow_x, -arrow_y) # go to arrow_x for x and arrow_y for y
```
3. 编程绘制一条如下图所示的垂直方向上的坐标轴。要求:By programming, draw an ertical axis as the following figure shows. Requirement:
1. 该图所是的坐标轴基本上是把水平方向的坐标轴围绕这坐标原点向左侧旋转90度得到;
The axis can basically be considered as a 90 degree of anti-closewise rotation of the horizontal axis illustrated in the lecture with original zero point as the rotation center;
2. 大部分控制数轴风格的参数值与示例中的一样,下列除外:但是刻度线位于坐标轴的右侧,刻度值位于坐标轴的左侧。Most of the parameters controlling the style of the axis are same as introduced in the lecture, except: the marker lines are located on right side of the axis line, and the marker values are on the left side;
3. 隐藏表示0刻度坐标值的文字“0”以及对应的刻度线; Hide the marker line and the marker value for origin point;
4. 在箭头的左侧添加字母"y",字母"y"使用的字体是"Arial",字号大小为10,风格为“斜体”。Add the letter "y" on left side of the axis end, the font for "y" is "Arial", size is 10, and style is "italic";
5. 如果绘图区的高度发生改变不再是400像素,你的代码应仅需要修改一处就能重新绘制出填满大部分(保留上下个20像素高的间隙)绘图区高度的数轴。If the height of drawing area is changed to any other value other than 400 pixels, your codes should only need to change one place in order to draw the new vertical axis that fullfill the most height of the draw area (keep 20 pixels paddings for both ends).
<img src="figures/L013_assignment3.png" style="align:center" height="400px"/>
```
reset()
#TODO: Add your own codes here 在这里添加你自己的代码
# Solution2: using goto() 第二种方法:使用goto()
min_x = -1 * max_x
# draw line
pu() # 提起画笔,暂停绘图
home() # Move turtle to the origin – coordinates (0,0) 移动小海龟至初始位置
# and set its heading to its start-orientation 并设置朝向为初始朝向
goto(0, min_x) # go to the left end of the line 移动海龟到坐标轴直线的最左端
pd() # 落下画笔,准备绘图
goto(0, max_x) # go to the right end of the line 移动海龟到坐标轴直线的最右段
# draw mark 绘制刻度线
cur_x = min_x # cur_x is min_x
while cur_x <= max_x:
if cur_x % mark_interval == 0:
pu() # pen up
goto(cur_x, 0) # go to cur_x fof x and 0 for y
pd() # pen down
goto(cur_x, mark_line_length) # 绘制刻度线
pu() # pen up
goto(cur_x, -text_offset) # go to cur_x for x nd -text_offset for y.
pd() # pen down
if cur_x == 0:
pass
else:
text = str(int(cur_x//mark_interval)) # text is str(int(cur_x//mark_interval))
write(text, align="center") # 书写刻度值
cur_x += delta_x # cur_x is delta_x + delta_x
if show_arrow: # if you need to draw arrows
arrow_x, arrow_y = max_x - 10, -5
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, arrow_y) # go to arrow_x for x and arrow_y for y
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, -arrow_y) # go to arrow_x for x and arrow_y for y
pu()
goto(max_x, 0)
right(90)
fd(text_offset)
write("x", move=False, align="center", font=("Arial", 10, "italic"))
ht() # hide turtle
ht()
reset()
pd()
st()
speed(2)
min_x = -1 * max_x # 根据坐标轴允许的最大值,获取该坐标轴允许的最小值
pu() # 提起画笔,暂停绘图
home() # Move turtle to the origin – coordinates (0,0) 移动小海龟至初始位置
right(90) # and set its heading to its start-orientation 并设置朝向为初始朝向
fd(max_x) # forward max_x
pd() # 落下画笔,准备绘图
# draw mark 绘制刻度线
cur_x, last_x = min_x, min_x # 海龟当前位置和最近一次绘图后停留的位置
while cur_x <= max_x: # 循环
if cur_x % mark_interval == 0: # 海龟的位置是相邻刻度间隔长度的整数倍
length_move = cur_x - last_x # 计算海龟应该前进的长度
pd() # 落下画笔,准备绘图
bk(length_move) # 海龟前进(绘制一小段)
left(mark_degree) # 向左转90度,海龟朝正上方,准备绘制刻度线
fd(mark_line_length) # 绘制刻度线
pu() # 把画笔提起暂停绘图
bk(mark_line_length + text_offset) # 后退(向下)一段长度
text = str(int(cur_x // mark_interval))# 准备刻度值字符串(由整型数据转换而来)
if cur_x == 0:
fd(text_offset)
right(90)
else:
write(text, align="center") # 在当前位置以居中的形式书写文字字符串
fd(text_offset) # 前进(向上)一小段长度
right(mark_degree) # 向右转90度,海龟次朝向右侧
last_x = cur_x # 记录海龟当前位置,为下次绘图的起点
cur_x += delta_x # 当前位置增加一小段长度(个单位距离:像素)
pd() # 落下画笔,准备绘制
fd(max_x - last_x) # 绘制最后一个刻度线到数轴最大x值这一小段
if show_arrow: # 如果需要绘制箭头
bk(60)
right(arrow_degree) # 向右转,海龟朝向右侧偏下
fd(arrow_length) # 后退一定距离,绘制箭头一边
bk(arrow_length) # 回到max_x位置
left(arrow_degree * 2) # 向左转,海龟朝向右侧偏上
fd(arrow_length) # 后退一定距离,绘制箭头另一边
pu()
right(120)
fd(20)
write("y", move=False, align="left", font=("Arial", 15, "italic")) # 在当前位置以居中的形式书写文字字符串
ht() # 隐藏海龟
st()
home()
```
4. 编程绘制一条如下图所示的水平坐标轴。与本讲示例不同的是:By programming, draw a horizontal axis with major and minor marker lines as shown in the figure. Most of the parameters that control the style of the aixs remain same as introduced in the lecture, except:
1. 在刻度线的内部再绘制9条段的次要刻度线,这样原来相邻的两条刻度线被等间距的分为10个等分,每个等分对应的长度为5;Add 9 minor marker lines within two major marker lines so that every major marker interval is divided into 10 equal minor marker intervals, each 5 pixles length;
2. 与原来刻度线的宽度为10不同,次要刻度线的宽度为6; the length of the minor marker line is 6 pixels, keep the length of the major marker line 10 pixels unchanged;
3. (困难,可选做)在左右两侧整数刻度之外的区域**不要**绘制次要刻度线;(Difficult, Optional) Do **NOT** add minor maker lines on the parts where the position is smaller than the minimal major marker value or larger than the maximal major marker value;
4. 将表示0刻度坐标值的文字“0”的位置向右移动距离10,刻度线仍保持与相邻的刻度线等距离不变; Move the text "0", which indicating the value 0 on the axis, 10 pixels right to its original position. Keep the mark line where it is;
5. 在箭头的下方添加字母"x",字母"x"使用的字体是"Arial",字号大小为10,风格为“斜体”。Add a letter "x" under the arrow at the right end of the axis, use font "Arial", size 10, and "italic" to write the "x";
6. 当调整绘图区域的大小为宽为800像素时,你的代码应该仅需要更新绘图区的宽度而不改变其他地方就能直接调整数轴长度和刻度的显示。When the width of drawing area changed to 800 pixels from 600 pixels, your codes should only need to change the value of `width` while keep others unchanged to draw the axis with new length and markers.
<img src="figures/L013_assignment4.png" style="align:center" height="400px"/>
```
origin = (0, 0) # 原点的位置
padding = 20 # 数轴端点距离绘图区边界距离
max_x = width/2 - padding # x轴最大值
show_arrow = True # 是否显示箭头
mark_interval = 50 # 刻度线间距
mark_line_length = 10 # 刻度线高度
text_offset = 20 # 坐标值距离坐标线的距离
minor_mark_line_interval = 5
minor_mark_line_length = 6
minor_mark_degree = 90
mark_degree = 90 # 坐标刻度与坐标轴夹角
arrow_length = 100 # 箭头长度
arrow_degree = 30 # 箭头与坐标轴夹角
delta_x = 1
reset()
#TODO: Add your own codes here 在这里添加你自己的代码
reset()
min_x = -1 * max_x
minor_line_drawn_per_mark = 0
# draw line
pu() # 提起画笔,暂停绘图
home() # Move turtle to the origin – coordinates (0,0) 移动小海龟至初始位置
# and set its heading to its start-orientation 并设置朝向为初始朝向
goto(min_x, 0) # go to the left end of the line 移动海龟到坐标轴直线的最左端
pd() # 落下画笔,准备绘图
goto(max_x, 0) # go to the right end of the line 移动海龟到坐标轴直线的最右段
# draw mark 绘制刻度线
cur_x = min_x # cur_x is min_x
while cur_x <= max_x: # while cur_x is still in the line
if minor_line_drawn_per_mark == 9: #if
minor_line_drawn_per_mark = 0 # set minor_line_drawn_per_mark to 0 at the start of each while loop
pu() # pen up
goto(cur_x, 0) # go to cur_x for x and 0 for y
pd() # pen down
goto(cur_x, mark_line_length) # 绘制刻度线
pu() # pen up
goto(cur_x, -text_offset) # go to cur_x for x nd -text_offset for y.
text = str(int(cur_x//mark_interval)) # text is str(int(cur_x//mark_interval))
write(text, align="center")
pd() # pen down
cur_x += delta_x # cur_x is delta_x + delta_x
else:
pu() # pen up
goto(cur_x, 0) # go to cur_x for x and 0 for y
pd() # pen down
goto(cur_x, minor_mark_line_length) # 绘制刻度线
minor_line_drawn_per_mark += 1
cur_x += minor_mark_line_interval
if show_arrow: # if you need to draw arrows
arrow_x, arrow_y = max_x - 10, -5
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, arrow_y) # go to arrow_x for x and arrow_y for y
pu() # pen up
goto(max_x, 0) # go to max_x for x and 0 for y
pd() # pen down
goto(arrow_x, -arrow_y) # go to arrow_x for x and arrow_y for y
pu()
goto(max_x, 0)
right(90)
fd(text_offset)
write("x", move=False, align="center", font=("Arial", 10, "italic"))
ht() # hide turtle
```
<span style="color:#ff0000; font-size:300%"><u>Good</u></span>
| github_jupyter |
# Thermal Speed
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from plasmapy.formulary import (
Maxwellian_speed_1D,
Maxwellian_speed_2D,
Maxwellian_speed_3D,
)
from plasmapy.formulary.parameters import thermal_speed
```
The thermal_speed function can be used to calculate the thermal velocity for a Maxwellian velocity distribution. There are three common definitions of the thermal velocity, which can be selected using the "method" keyword, which are defined for a 3D velocity distribution as
- 'most_probable' <br>
$v_{th} = \sqrt{\frac{2 k_B T}{m}}$
- 'rms' <br>
$v_{th} = \sqrt{\frac{3 k_B T}{m}}$
- 'mean_magnitude' <br>
$v_{th} = \sqrt{\frac{8 k_B T}{m\pi}}$
The differences between these velocities can be seen by plotitng them on a 3D Maxwellian speed distribution
```
T = 1e5 * u.K
speeds = np.linspace(0, 8e6, num=600) * u.m / u.s
pdf_3D = Maxwellian_speed_3D(speeds, T=T, particle="e-")
fig, ax = plt.subplots(figsize=(4, 3))
v_most_prob = thermal_speed(T=T, particle="e-", method="most_probable", ndim=3)
v_rms = thermal_speed(T=T, particle="e-", method="rms", ndim=3)
v_mean_magnitude = thermal_speed(T=T, particle="e-", method="mean_magnitude", ndim=3)
ax.plot(speeds / v_rms, pdf_3D, color="black", label="Maxwellian")
ax.axvline(x=v_most_prob / v_rms, color="blue", label="Most Probable")
ax.axvline(x=v_rms / v_rms, color="green", label="RMS")
ax.axvline(x=v_mean_magnitude / v_rms, color="red", label="Mean Magnitude")
ax.set_xlim(-0.1, 3)
ax.set_ylim(0, None)
ax.set_title("3D")
ax.set_xlabel("|v|/|v$_{rms}|$")
ax.set_ylabel("f(|v|)")
```
Similar speeds are defined for 1D and 2D distributions. The differences between these definitions can be illustrated by plotting them on their respective Maxwellian speed distributions.
```
pdf_1D = Maxwellian_speed_1D(speeds, T=T, particle="e-")
pdf_2D = Maxwellian_speed_2D(speeds, T=T, particle="e-")
dim = [1, 2, 3]
pdfs = [pdf_1D, pdf_2D, pdf_3D]
plt.tight_layout()
fig, ax = plt.subplots(ncols=3, figsize=(10, 3))
for n, pdf in enumerate(pdfs):
ndim = n + 1
v_most_prob = thermal_speed(T=T, particle="e-", method="most_probable", ndim=ndim)
v_rms = thermal_speed(T=T, particle="e-", method="rms", ndim=ndim)
v_mean_magnitude = thermal_speed(
T=T, particle="e-", method="mean_magnitude", ndim=ndim
)
ax[n].plot(speeds / v_rms, pdf, color="black", label="Maxwellian")
ax[n].axvline(x=v_most_prob / v_rms, color="blue", label="Most Probable")
ax[n].axvline(x=v_rms / v_rms, color="green", label="RMS")
ax[n].axvline(x=v_mean_magnitude / v_rms, color="red", label="Mean Magnitude")
ax[n].set_xlim(-0.1, 3)
ax[n].set_ylim(0, None)
ax[n].set_title("{:d}D".format(ndim))
ax[n].set_xlabel("|v|/|v$_{rms}|$")
ax[n].set_ylabel("f(|v|)")
ax[2].legend(bbox_to_anchor=(1.9, 0.8), loc="upper right")
```
| github_jupyter |
#### Copyright IBM All Rights Reserved.
#### SPDX-License-Identifier: Apache-2.0
# Db2 Sample For Scikit-Learn
In this code sample, we will show how to use the Db2 Python driver to import data from our Db2 database. Then, we will use that data to create a machine learning model with scikit-learn.
Many wine connoisseurs love to taste different wines from all over the world. Mostly importantly, they want to know how the quality differs between each wine based on the ingredients. Some of them also want to be able to predict the quality before even tasting it. In this notebook, we will be using a dataset that has collected certain attributes of many wine bottles that determines the quality of the wine. Using this dataset, we will help our wine connoisseurs predict the quality of wine.
This notebook will demonstrate how to use Db2 as a data source for creating machine learning models.
Prerequisites:
1. Python 3.6 and above
2. Db2 on Cloud instance (using free-tier option)
3. Data already loaded in your Db2 instance
4. Have Db2 connection credentials on hand
We will be importing two libraries- `ibm_db` and `ibm_dbi`. `ibm_db` is a library with low-level functions that will directly connect to our db2 database. To make things easier for you, we will be using `ibm-dbi`, which communicates with `ibm-db` and gives us an easy interface to interact with our data and import our data as a pandas dataframe.
For this example, we will be using the [winequality-red dataset](../data/winequality-red.csv), which we have loaded into our Db2 instance.
NOTE: Running this notebook within a docker container. If `!easy_install ibm_db` doesn't work on your normally on jupter notebook, you may need to also run this notebook within a docker container as well.
## 1. Import Data
Let's first install and import all the libraries needed for this notebook. Most important we will be installing and importing the db2 python driver `ibm_db`.
```
!pip install sklearn
!easy_install ibm_db
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# The two python ibm db2 drivers we need
import ibm_db
import ibm_db_dbi
```
Now let's import our data from our data source using the python db2 driver.
```
# replace only <> credentials
dsn = "DRIVER={{IBM DB2 ODBC DRIVER}};" + \
"DATABASE=<DATABASE NAME>;" + \
"HOSTNAME=<HOSTNMAE>;" + \
"PORT=50000;" + \
"PROTOCOL=TCPIP;" + \
"UID=<USERNAME>;" + \
"PWD=<PWD>;"
hdbc = ibm_db.connect(dsn, "", "")
hdbi = ibm_db_dbi.Connection(hdbc)
sql = 'SELECT * FROM <SCHEMA NAME>.<TABLE NAME>'
wine = pandas.read_sql(sql,hdbi)
#wine = pd.read_csv('../data/winequality-red.csv', sep=';')
wine.head()
```
## 2. Data Exploration
In this step, we are going to try and explore our data inorder to gain insight. We hope to be able to make some assumptions of our data before we start modeling.
```
wine.describe()
# Minimum price of the data
minimum_price = np.amin(wine['quality'])
# Maximum price of the data
maximum_price = np.amax(wine['quality'])
# Mean price of the data
mean_price = np.mean(wine['quality'])
# Median price of the data
median_price = np.median(wine['quality'])
# Standard deviation of prices of the data
std_price = np.std(wine['quality'])
# Show the calculated statistics
print("Statistics for housing dataset:\n")
print("Minimum quality: {}".format(minimum_price))
print("Maximum quality: {}".format(maximum_price))
print("Mean quality: {}".format(mean_price))
print("Median quality {}".format(median_price))
print("Standard deviation of quality: {}".format(std_price))
wine.corr()
corr_matrix = wine.corr()
corr_matrix["quality"].sort_values(ascending=False)
```
## 3. Data Visualization
```
wine.hist(bins=50, figsize=(30,25))
plt.show()
boxplot = wine.boxplot(column=['quality'])
```
## 4. Creating Machine Learning Model
Now that we have cleaned and explored our data. We are ready to build our model that will predict the attribute `quality`.
```
wine_value = wine['quality']
wine_attributes = wine.drop(['quality'], axis=1)
from sklearn.preprocessing import StandardScaler
# Let us scale our data first
sc = StandardScaler()
wine_attributes = sc.fit_transform(wine_attributes)
from sklearn.decomposition import PCA
# Apply PCA to our data
pca = PCA(n_components=8)
x_pca = pca.fit_transform(wine_attributes)
```
We need to split our data into train and test data.
```
from sklearn.model_selection import train_test_split
# Split our data into test and train data
x_train, x_test, y_train, y_test = train_test_split( wine_attributes,wine_value, test_size = 0.25)
```
We will be using Logistic Regression to model our data
```
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score
lr = LogisticRegression()
# Train our model
lr.fit(x_train, y_train)
# Predict using our trained model and our test data
lr_predict = lr.predict(x_test)
# Print confusion matrix and accuracy score
lr_conf_matrix = confusion_matrix(y_test, lr_predict)
lr_acc_score = accuracy_score(y_test, lr_predict)
print(lr_conf_matrix)
print(lr_acc_score*100)
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.