text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
# QDAE (Quantized Distribution Auto Encoder)
Basic question: Can we learn latent variable probability distribution?
Here we have single scalar value AE, so a very rudimentary problem.
x -> qd(h) -> h' -> x_bar
qd(h) is a quantized probability distribution of the latent variable h
h' is a weighted sum of qd(h) where the weights are linspace(0, 1).
x_bar is output of the network, trained to be same as x
1. linspace(0,1) above encourages qd(h) to map monotonously w.r.t. values of x.
2. Regularizer: smoothness encourages qd(h) to be smooth, i.e. low variance
3. Regularizer: Sum to 1, encourages qd(h) to be a probability distribution
4. Regularizer: use all elements of resolution, encourages distributed/sparse coding
```
# http://pytorch.org/
from os.path import exists
from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag())
cuda_output = !ldconfig -p|grep cudart.so|sed -e 's/.*\.\([0-9]*\)\.\([0-9]*\)$/cu\1\2/'
accelerator = cuda_output[0] if exists('/dev/nvidia0') else 'cpu'
#!pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.4.1-{platform}-linux_x86_64.whl torchvision
!pip install torch
!pip install tqdm
!pip install dotmap
from dotmap import DotMap
import logging
import numpy as np
import os
import random
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from tqdm import tqdm, tqdm_notebook
from scipy.stats import norm
# device = "cuda" if torch.cuda.is_available() else "cpu"
device = "cpu"
logging.basicConfig(
level=logging.ERROR,
format='%(asctime)s.%(msecs)03d %(name)s:%(funcName)s %(levelname)s:%(message)s',
datefmt="%M:%S")
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from skimage.draw import line_aa
%matplotlib inline
plt.style.use('classic')
# from google.colab import drive
# drive.mount('/content/gdrive')
# save_path_prefix = '/content/gdrive/My Drive/Colab Notebooks/saved/QDL_01'
def show_image(image, vmin=None, vmax=None, title=None, print_values=False, figsize=(4, 4)):
#print("image ", image.shape)
image = image.cpu().numpy()
fig, ax1 = plt.subplots(figsize=figsize)
if title:
plt.title(title)
#i = image.reshape((height, width))
#print("i ", i.shape)
ax1.imshow(image, vmin=vmin, vmax=vmax, interpolation='none', cmap=plt.cm.plasma)
plt.show()
if print_values:
print(image)
def show_image_grid(images, vmin=0, vmax=1, nrows=None, ncols=None, fig_width=30):
s = images.shape
assert len(s) == 3
if nrows is None or ncols is None:
resolution = int(s[0] ** 0.5)
nrows = resolution
ncols = resolution
assert images.shape[0] == nrows * ncols, f"{images.shape[0]} != {nrows} * {ncols}"
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(fig_width, fig_width * nrows / ncols),
subplot_kw={'xticks': [], 'yticks': []})
fig.subplots_adjust(left=0.03, right=0.97, hspace=0, wspace=0)
axs = axs.flat
for i in np.arange(s[0]):
axs[i].axis("off")
axs[i].imshow(images[i].detach().cpu().numpy(), vmin=vmin, vmax=vmax, interpolation='none', cmap=plt.cm.plasma, aspect='auto')
plt.tight_layout()
plt.show()
# Creates pdf for each item
# (input_count, feature_count, height, width) => (input_count, feature_count * resolution, height, width)
def to_pdf(mu, var, resolution=10):
mu_shape = mu.shape
input_count, feature_count, height, width = mu_shape
step = 1.0 / (resolution + 1)
assert mu_shape == var.shape
assert resolution > 0
assert resolution < 50
# mu and logvar: move features to the end and flatten
print("to_pdf: mu", mu.shape)
mu_combined = mu.detach().permute(0, 2, 3, 1).contiguous().view(-1).cpu().numpy()
print("to_pdf: mu_combined", mu_combined.shape)
var = torch.clamp(var, step * 0.005, 3.0)
var_combined = var.detach().permute(0, 2, 3, 1).contiguous().view(-1).cpu().numpy()
print("to_pdf: var_combined", var_combined.shape)
# pdf for each item
rr = np.arange(step, 1, step)
pdfs = []
for i in np.arange(mu_combined.shape[0]):
pdf = norm.pdf(rr, mu_combined[i], var_combined[i])
pdf = pdf / pdf.sum()
pdfs.append(pdf)
mu__ = torch.as_tensor(pdfs)
print("to_pdf: mu__", mu__.shape) # (*, resolution)
mu__ = mu__.view(-1, feature_count, resolution)
print("to_pdf: mu__", mu__.shape) # (*, feature_count, resolution)
mu__ = mu__.view(input_count, height, width, feature_count * resolution).contiguous()
print("to_pdf: mu__", mu__.shape) # (input_count, height, width, feature_count * resolution)
#mu__ = mu__.permute(0, 3, 1, 2).contiguous() # (input_count, feature_count * resolution, height, width)
#print("to_pdf: mu__", mu__.shape)
return mu__
# plt.plot(to_pdf(torch.tensor([[[[0.49]]]]), torch.tensor([[[[0.04]]]]), resolution=40).reshape(-1, 1).numpy())
```
## Train
Use x values in the range 0-1
```
# x -> QD(x) -> x_bar
# Quantized distribution auto encoder
class QDAE(nn.Module):
def __init__(self, input_output_size, hidden_size, latent_feature_count, resolution):
super(QDAE, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(logging.WARN)
self.input_output_size = input_output_size
self.hidden_size = hidden_size
self.latent_feature_count = latent_feature_count
self.resolution = resolution
self.enc1 = nn.Linear(input_output_size, hidden_size)
self.enc2 = nn.Linear(hidden_size, hidden_size)
self.enc3 = nn.Linear(hidden_size, latent_feature_count * resolution)
self.dec1 = nn.Linear(latent_feature_count, hidden_size)
self.dec2 = nn.Linear(hidden_size, input_output_size)
def encode(self, x):
x = self.enc1(x)
x = F.relu(x)
x = self.enc2(x)
x = F.relu(x)
x = self.enc3(x)
x = torch.sigmoid(x)
return x
def decode(self, h):
y = self.dec1(h)
y = F.relu(y)
y = self.dec2(y)
x_bar = F.sigmoid(y)
return x_bar
def forward(self, x):
self.logger.debug(f"x {x.shape}")
qd_h = self.encode(x)
# force smaller x to use smaller indices and larger x use larger indices.
# Use this in conjunction with regularizing
h = qd_h @ torch.linspace(0, 1, self.resolution).unsqueeze(dim=1)
x_bar = self.decode(h)
return qd_h, x_bar
def train(model, device, X, optimizer, epochs, log_interval):
model.train()
X = X.to(device)
#print("X", X)
for epoch in range(epochs):
optimizer.zero_grad()
qd_h, X_bar = model(X)
#print("output", output)
loss = F.mse_loss(X_bar, X)
# smoothness
loss += (qd_h[:, 0:-1] - qd_h[:, 1:]).pow(2).mean().pow(0.5) * 0.01
# should sum to 1
loss += (qd_h.sum(dim=1) - 1).pow(2).mean().pow(0.5) * 0.01
# use all elements of resolution
use_count = qd_h.sum(dim=0)
avg_use = use_count.mean()
err = (use_count - avg_use).pow(2).sum().pow(0.5) / resolution
loss += err * 0.01
loss.backward()
optimizer.step()
if epoch % log_interval == 0:
print(f"Epoch: {epoch} \t Loss: {loss.item():.6f}")
show_image(qd_h.detach(), title="qd_h", print_values=False)
show_image((qd_h == qd_h.max(dim=1, keepdim=True)[0]).view_as(qd_h).detach(), title="qd_h winner", print_values=False)
if loss < 0.0001:
break
resolution = 10
model = QDAE(input_output_size=1, hidden_size=resolution, latent_feature_count=1, resolution=resolution)
#for param in model.parameters():
# print(type(param.data), param.size())
#nn.init.constant(param.data, val=0.1)
#param.data += 0.1
optimizer = optim.Adam(model.parameters(), lr=0.01)
X = torch.cat((torch.tensor(np.arange(0, .50, .1)).unsqueeze(dim=1).float(),
torch.tensor(np.arange(.51, 1.0, .1)).unsqueeze(dim=1).float()))
print("X", X.shape)
train(model=model, device=device, X=X, optimizer=optimizer, epochs=10000, log_interval=1000)
# qd_h, X_bar = model(X)
# show_image(qd_h.detach(), title="qd_h", print_values=False, figsize=(4, 80))
# show_image((qd_h == qd_h.max(dim=1, keepdim=True)[0]).view_as(qd_h).detach(), title="qd_h winner", print_values=False, figsize=(4, 80))
X = torch.tensor(np.arange(0.0, 1.0, 0.1)).unsqueeze(dim=1).float()
qd_h, X_bar = model(X)
show_image(qd_h.detach(), title="qd_h", print_values=False, figsize=(4, 80))
show_image((qd_h == qd_h.max(dim=1, keepdim=True)[0]).view_as(qd_h).detach(), title="qd_h winner", print_values=False, figsize=(4, 80))
plt.plot(X.detach().numpy(), X_bar.detach().numpy())
```
| github_jupyter |
```
%matplotlib inline
import numpy as np
import time
import tensorflow as tf
from tensorflow import keras
import sys
sys.path.append("..")
import d2lzh_tensorflow2 as d2l
def get_data_ch7(): # 本函数已保存在d2lzh_tensorflow2包中方便以后使用
data = np.genfromtxt('../../data/airfoil_self_noise.dat', delimiter='\t')
data = (data - data.mean(axis=0)) / data.std(axis=0)
return tf.convert_to_tensor(data[:1500, :-1],dtype=tf.float32), tf.convert_to_tensor(data[:1500, -1],dtype=tf.float32)
features,labels = get_data_ch7()
features.shape
```
7.3.2. 从零开始实现
```
def sgd(params, states,hyperparams,grads):
for i,p in enumerate(params):
p.assign_sub(hyperparams['lr'] * grads[i])
```
“线性回归的从零开始实现”一节中已经实现过小批量随机梯度下降算法。我们在这里将它的输入参数变得更加通用,主要是为了方便本章后面介绍的其他优化算法也可以使用同样的输入。具体来说,我们添加了一个状态输入states并将超参数放在字典hyperparams里。此外,我们将在训练函数里对各个小批量样本的损失求平均,因此优化算法里的梯度不需要除以批量大小。
```
# 本函数已保存在d2lzh_tensprflow2包中方便以后使用
def train_ch7(optimizer_fn, states, hyperparams, features, labels,
batch_size=10, num_epochs=2):
# 初始化模型
net, loss = d2l.linreg, d2l.squared_loss
w = tf.Variable(np.random.normal(0, 0.01, size=(features.shape[1], 1)), dtype=tf.float32)
b = tf.Variable(tf.zeros(1,dtype=tf.float32))
def eval_loss():
return np.array(tf.reduce_mean(loss(net(features, w, b), labels)))
ls = [eval_loss()]
data_iter = tf.data.Dataset.from_tensor_slices((features,labels)).batch(batch_size)
data_iter = data_iter.shuffle(100)
for _ in range(num_epochs):
start = time.time()
for batch_i, (X, y) in enumerate(data_iter):
with tf.GradientTape() as tape:
l = tf.reduce_mean(loss(net(X, w, b), y)) # 使用平均损失
grads = tape.gradient(l, [w,b])
optimizer_fn([w, b], states, hyperparams,grads) # 迭代模型参数
if (batch_i + 1) * batch_size % 100 == 0:
ls.append(eval_loss()) # 每100个样本记录下当前训练误差
# 打印结果和作图
print('loss: %f, %f sec per epoch' % (ls[-1], time.time() - start))
d2l.set_figsize()
d2l.plt.plot(np.linspace(0, num_epochs, len(ls)), ls)
d2l.plt.xlabel('epoch')
d2l.plt.ylabel('loss')
def train_sgd(lr, batch_size, num_epochs=2):
train_ch7(sgd, None, {'lr': lr}, features, labels, batch_size, num_epochs)
train_sgd(1, 1500, 6)
train_sgd(0.005, 1)
train_sgd(0.05, 10)
```
7.3.3. 简洁实现
同样,我们也无须自己实现小批量随机梯度下降算法。tensorflow.keras.optimizers 模块提供了很多常用的优化算法比如SGD、Adam和RMSProp等。下面我们创建一个用于优化model 所有参数的优化器实例,并指定学习率为0.05的小批量随机梯度下降(SGD)为优化算法。
```
from tensorflow.keras import optimizers
trainer = optimizers.SGD(learning_rate=0.05)
# 本函数已保存在d2lzh_tensorflow2包中方便以后使用,事实上用不到trainer_hyperparams这个参数,这样写是为了和原书保持一致
def train_tensorflow2_ch7(trainer_name, trainer_hyperparams, features, labels,
batch_size=10, num_epochs=2):
# 初始化模型
net = tf.keras.Sequential()
net.add(tf.keras.layers.Dense(1))
loss = tf.losses.MeanSquaredError()
def eval_loss():
return np.array(tf.reduce_mean(loss(net(features), labels)))
ls = [eval_loss()]
data_iter = tf.data.Dataset.from_tensor_slices((features,labels)).batch(batch_size)
data_iter = data_iter.shuffle(100)
# 创建Trainer实例来迭代模型参数
for _ in range(num_epochs):
start = time.time()
for batch_i, (X, y) in enumerate(data_iter):
with tf.GradientTape() as tape:
l = tf.reduce_mean(loss(net(X), y)) # 使用平均损失
grads = tape.gradient(l, net.trainable_variables)
trainer.apply_gradients(zip(grads, net.trainable_variables)) # 迭代模型参数
if (batch_i + 1) * batch_size % 100 == 0:
ls.append(eval_loss()) # 每100个样本记录下当前训练误差
# 打印结果和作图
print('loss: %f, %f sec per epoch' % (ls[-1], time.time() - start))
d2l.set_figsize()
d2l.plt.plot(np.linspace(0, num_epochs, len(ls)), ls)
d2l.plt.xlabel('epoch')
d2l.plt.ylabel('loss')
train_tensorflow2_ch7('trainer', {'learning_rate': 0.05}, features, labels, 10)
```
| github_jupyter |
# note:
* [covariance matrix](http://docs.scipy.org/doc/numpy/reference/generated/numpy.cov.html)
* [multivariate_normal](http://docs.scipy.org/doc/numpy/reference/generated/numpy.random.multivariate_normal.html)
* [seaborn bivariate kernel density estimate](https://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.kdeplot.html#seaborn.kdeplot)
```
%reload_ext autoreload
%autoreload 2
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(context="notebook", style="white", palette=sns.color_palette("RdBu"))
import numpy as np
import pandas as pd
import scipy.io as sio
from scipy import stats
import sys
sys.path.append('..')
from helper import anomaly
from sklearn.cross_validation import train_test_split
```
You want to divide data into 3 set.
1. Training set
2. Cross Validation set
3. Test set.
You shouldn't be doing prediction using training data or Validation data as it does in the exercise.
```
mat = sio.loadmat('./data/ex8data1.mat')
mat.keys()
X = mat.get('X')
```
divide original validation data into validation and test set
```
Xval, Xtest, yval, ytest = train_test_split(mat.get('Xval'),
mat.get('yval').ravel(),
test_size=0.5)
```
Visualize training data
```
sns.regplot('Latency', 'Throughput',
data=pd.DataFrame(X, columns=['Latency', 'Throughput']),
fit_reg=False,
scatter_kws={"s":20,
"alpha":0.5})
```
# estimate multivariate Gaussian parameters $\mu$ and $\sigma^2$
> according to data, X1, and X2 is not independent
```
mu = X.mean(axis=0)
print(mu, '\n')
cov = np.cov(X.T)
print(cov)
# example of creating 2d grid to calculate probability density
np.dstack(np.mgrid[0:3,0:3])
# create multi-var Gaussian model
multi_normal = stats.multivariate_normal(mu, cov)
# create a grid
x, y = np.mgrid[0:30:0.01, 0:30:0.01]
pos = np.dstack((x, y))
fig, ax = plt.subplots()
# plot probability density
ax.contourf(x, y, multi_normal.pdf(pos), cmap='Blues')
# plot original data points
sns.regplot('Latency', 'Throughput',
data=pd.DataFrame(X, columns=['Latency', 'Throughput']),
fit_reg=False,
ax=ax,
scatter_kws={"s":10,
"alpha":0.4})
```
# select threshold $\epsilon$
1. use training set $X$ to model the multivariate Gaussian
2. use cross validation set $(Xval, yval)$ to find the best $\epsilon$ by finding the best `F-score`
<img style="float: left;" src="../img/f1_score.png">
```
e, fs = anomaly.select_threshold(X, Xval, yval)
print('Best epsilon: {}\nBest F-score on validation data: {}'.format(e, fs))
```
# visualize prediction of `Xval` using learned $\epsilon$
1. use CV data to find the best $\epsilon$
2. use all data (training + validation) to create model
3. do the prediction on test data
```
multi_normal, y_pred = anomaly.predict(X, Xval, e, Xtest, ytest)
# construct test DataFrame
data = pd.DataFrame(Xtest, columns=['Latency', 'Throughput'])
data['y_pred'] = y_pred
# create a grid for graphing
x, y = np.mgrid[0:30:0.01, 0:30:0.01]
pos = np.dstack((x, y))
fig, ax = plt.subplots()
# plot probability density
ax.contourf(x, y, multi_normal.pdf(pos), cmap='Blues')
# plot original Xval points
sns.regplot('Latency', 'Throughput',
data=data,
fit_reg=False,
ax=ax,
scatter_kws={"s":10,
"alpha":0.4})
# mark the predicted anamoly of CV data. We should have a test set for this...
anamoly_data = data[data['y_pred']==1]
ax.scatter(anamoly_data['Latency'], anamoly_data['Throughput'], marker='x', s=50)
```
# high dimension data
```
mat = sio.loadmat('./data/ex8data2.mat')
X = mat.get('X')
Xval, Xtest, yval, ytest = train_test_split(mat.get('Xval'),
mat.get('yval').ravel(),
test_size=0.5)
e, fs = anomaly.select_threshold(X, Xval, yval)
print('Best epsilon: {}\nBest F-score on validation data: {}'.format(e, fs))
multi_normal, y_pred = anomaly.predict(X, Xval, e, Xtest, ytest)
print('find {} anamolies'.format(y_pred.sum()))
```
The huge difference between my result, and the official `117` anamolies in the ex8 is due to:
1. my use of **multivariate Gaussian**
2. I split data very differently
| github_jupyter |
# Lists
The data structures that we use most often in data science are:
* arrays, from `numpy`;
* data frames, from `pandas`.
There is another data structure for containing sequences of values
- the `list`.
You have already seen these in passing, when we created arrays. Now we cover them in more detail.
## Creating a list
You make a list like this:
```
my_list = [1, 2, 3]
my_list
```
More formally, you define a list like this:
1. the open square bracket character `[`, followed by
1. a sequence of zero of more values, separated by commas, followed by
1. the close square bracket character `]`.
We defined the list above with `[1, 2, 3]`.
Here is a list with one value:
```
another_list = [99]
another_list
```
As implied above, the list can be empty, in which case there is nothing between the `[` and the `]`:
```
empty_list = []
empty_list
```
A list is of type `list`:
```
type(my_list)
```
## Lists and arrays
We will soon need the Numpy library for the examples:
```
import numpy as np
```
A list is a container, like an array. Like an array, it contains an
ordered sequence of values. Like an array, it can be empty, or it can
contain any number of values.
You can *convert a list into an array*, like this
```
my_list = [4, 5, 6]
my_list
my_array = np.array(my_list)
my_array
```
In fact, you have already seen us do this, as a convenient way to
create a new array - for example:
```
# Define a list and immediately convert to an array
another_array = np.array([10, 20, 30])
another_array
```
You can also *convert an array into a list*, using the `list` function:
```
list(another_array)
```
Like arrays, you can *index* into a list, to get the individual values. Consider this list:
```
my_list = [4, 5, 6]
```
Like arrays (and every other kind of sequence in Python), the first element is at index (offset) 0:
```
# Get the first value
my_list[0]
```
Accordingly, the third element is at index (offset) 2:
```
# Get the third value
my_list[2]
```
## Differences between lists and arrays
### A list can contain a mix of types
The elements in a list can be of any type, and there can be many different types in a list. For example, here is a list that mixes integers, floating point values, and strings.
```
mixed_list = [1, 1.1, 'Ho ho ho']
mixed_list
```
The elements in an array must all be of the same type \- say \- all
numbers, or all strings. If we now create an array from this list, Numpy will try and find a type that works for all the elements, and will convert the elements to that type:
```
unmixed_array = np.array(mixed_list)
unmixed_array
```
Notice that Numpy has converted all the elements to strings, as you can see from the quotes around the values.
## A list is always one-dimensional
You have already seen that an array can have two (or more) dimensions. Here is a two-dimensional array of random numbers:
```
two_d_array = np.random.uniform(size=[5, 3])
two_d_array
```
Lists only have one dimension.
### Lists can contain lists
You may be able to see a way of making a list that is rather like an
array with two dimensions. Remember that a list can contain elements
of *any type*. That means that a list can contain elements of type
`list`. Here is a list where the first element is also a list:
```
small_list = [21, 22, 23]
funky_list = [small_list, 1, 1.1]
funky_list
```
As usual, I can get the first element of `funky_list` by indexing:
```
element_at_0 = funky_list[0]
element_at_0
```
That first element is a list. As usual, I can index in that first element too. Here I ask for the second element from `element_at_0`:
```
element_at_0[1]
```
Putting that all together in one line, I can get the second element of the first element with:
```
funky_list[0][1]
```
Read this carefully from left to right. `funky_list` is the initial
list. The `[0]` after `funky_list` gives me the first element of that
list. The `[1]` after that, gives me the second element from the result
of the previous expressions. `funky_list[0][1]` puts several
expressions together in sequence; this is a very common pattern that
you will see often in Python programs. Learning to read them carefully
is one of the skills you will pick up, as you learn Python, and data
science.
## Appending to a list
You have already seen *methods*. These are functions attached to
values. For example, you have seen that a data frame, has a `count`
method, that, when called, gives the number of rows in the data frame.
Lists have several very useful methods. One of the most useful is `append`. Use it to append values to a list.
```
# An empty list
my_list = []
my_list
```
Append a value to the empty list:
```
my_list.append(1)
my_list
```
Append another value:
```
my_list.append(1.1)
my_list
```
And another:
```
my_list.append('Ho ho ho')
my_list
```
For some more operations with lists, see [More on
lists](../extra/more_on_lists).
## What do these square brackets mean?
You have seen two uses of square brackets on this page.
The first use, is where the opening square bracket is the start of an expression for a new list, like this:
```
a_new_list = [1, 2, 3]
```
The `[` is the start of the expression, because the line above is an
assignment statement, with a variable name, followed by `=` followed by an expression. The expression is `[1, 2, 3]`, and the `[` is the first character in the expression.
The second use, is to ask Python to get a value from another value.
Technically, this use is called *indexing*. For example, we do this
when we index into a list:
```
first_value = a_new_list[0]
```
Here the `[` refers back to `a_new_list`. It tells Python "please get
a value from `a_new_list`". Between the `[` and the `]` is the
information that Python must use to get the value.
Here is another example of square brackets in use:
```
my_array = np.array([1, 2, 3])
```
Which use of square brackets is that? List creation, or indexing?
Why?
How about:
```
a_new_list[0] = 99
```
? Why? Or:
```
a_new_list.append([101, 'one oh one'])
```
?
Remember the [data frame introduction](../04/data_frame_intro)? In
that you saw
```
gdp = gender_data['gdp']
```
Which use of square brackets was that?
Look through the [data frame introduction](../04/data_frame_intro)
page. There are several other examples of the use of square brackets. Which use are they?
Now look at [More on arrays](../03/More_on_Arrays). Which uses do you see there?
| github_jupyter |
# Deep Neural Network for Image Classification: Application
When you finish this, you will have finished the last programming assignment of Week 4, and also the last programming assignment of this course!
You will use use the functions you'd implemented in the previous assignment to build a deep network, and apply it to cat vs non-cat classification. Hopefully, you will see an improvement in accuracy relative to your previous logistic regression implementation.
**After this assignment you will be able to:**
- Build and apply a deep neural network to supervised learning.
Let's get started!
## 1 - Packages
Let's first import all the packages that you will need during this assignment.
- [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.
- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.
- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.
- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.
- dnn_app_utils provides the functions implemented in the "Building your Deep Neural Network: Step by Step" assignment to this notebook.
- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
```
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from dnn_app_utils_v3 import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
```
## 2 - Dataset
You will use the same "Cat vs non-Cat" dataset as in "Logistic Regression as a Neural Network" (Assignment 2). The model you had built had 70% test accuracy on classifying cats vs non-cats images. Hopefully, your new model will perform a better!
**Problem Statement**: You are given a dataset ("data.h5") containing:
- a training set of m_train images labelled as cat (1) or non-cat (0)
- a test set of m_test images labelled as cat and non-cat
- each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB).
Let's get more familiar with the dataset. Load the data by running the cell below.
```
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()
```
The following code will show you an image in the dataset. Feel free to change the index and re-run the cell multiple times to see other images.
```
# Example of a picture
index = 10
plt.imshow(train_x_orig[index])
print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
# Explore your dataset
m_train = train_x_orig.shape[0]
num_px = train_x_orig.shape[1]
m_test = test_x_orig.shape[0]
print ("Number of training examples: " + str(m_train))
print ("Number of testing examples: " + str(m_test))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_x_orig shape: " + str(train_x_orig.shape))
print ("train_y shape: " + str(train_y.shape))
print ("test_x_orig shape: " + str(test_x_orig.shape))
print ("test_y shape: " + str(test_y.shape))
```
As usual, you reshape and standardize the images before feeding them to the network. The code is given in the cell below.
<img src="images/imvectorkiank.png" style="width:450px;height:300px;">
<caption><center> <u>Figure 1</u>: Image to vector conversion. <br> </center></caption>
```
# Reshape the training and test examples
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The "-1" makes reshape flatten the remaining dimensions
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten/255.
test_x = test_x_flatten/255.
print ("train_x's shape: " + str(train_x.shape))
print ("test_x's shape: " + str(test_x.shape))
```
$12,288$ equals $64 \times 64 \times 3$ which is the size of one reshaped image vector.
## 3 - Architecture of your model
Now that you are familiar with the dataset, it is time to build a deep neural network to distinguish cat images from non-cat images.
You will build two different models:
- A 2-layer neural network
- An L-layer deep neural network
You will then compare the performance of these models, and also try out different values for $L$.
Let's look at the two architectures.
### 3.1 - 2-layer neural network
<img src="images/2layerNN_kiank.png" style="width:650px;height:400px;">
<caption><center> <u>Figure 2</u>: 2-layer neural network. <br> The model can be summarized as: ***INPUT -> LINEAR -> RELU -> LINEAR -> SIGMOID -> OUTPUT***. </center></caption>
<u>Detailed Architecture of figure 2</u>:
- The input is a (64,64,3) image which is flattened to a vector of size $(12288,1)$.
- The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ of size $(n^{[1]}, 12288)$.
- You then add a bias term and take its relu to get the following vector: $[a_0^{[1]}, a_1^{[1]},..., a_{n^{[1]}-1}^{[1]}]^T$.
- You then repeat the same process.
- You multiply the resulting vector by $W^{[2]}$ and add your intercept (bias).
- Finally, you take the sigmoid of the result. If it is greater than 0.5, you classify it to be a cat.
### 3.2 - L-layer deep neural network
It is hard to represent an L-layer deep neural network with the above representation. However, here is a simplified network representation:
<img src="images/LlayerNN_kiank.png" style="width:650px;height:400px;">
<caption><center> <u>Figure 3</u>: L-layer neural network. <br> The model can be summarized as: ***[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID***</center></caption>
<u>Detailed Architecture of figure 3</u>:
- The input is a (64,64,3) image which is flattened to a vector of size (12288,1).
- The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ and then you add the intercept $b^{[1]}$. The result is called the linear unit.
- Next, you take the relu of the linear unit. This process could be repeated several times for each $(W^{[l]}, b^{[l]})$ depending on the model architecture.
- Finally, you take the sigmoid of the final linear unit. If it is greater than 0.5, you classify it to be a cat.
### 3.3 - General methodology
As usual you will follow the Deep Learning methodology to build the model:
1. Initialize parameters / Define hyperparameters
2. Loop for num_iterations:
a. Forward propagation
b. Compute cost function
c. Backward propagation
d. Update parameters (using parameters, and grads from backprop)
4. Use trained parameters to predict labels
Let's now implement those two models!
## 4 - Two-layer neural network
**Question**: Use the helper functions you have implemented in the previous assignment to build a 2-layer neural network with the following structure: *LINEAR -> RELU -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:
```python
def initialize_parameters(n_x, n_h, n_y):
...
return parameters
def linear_activation_forward(A_prev, W, b, activation):
...
return A, cache
def compute_cost(AL, Y):
...
return cost
def linear_activation_backward(dA, cache, activation):
...
return dA_prev, dW, db
def update_parameters(parameters, grads, learning_rate):
...
return parameters
```
```
### CONSTANTS DEFINING THE MODEL ####
n_x = 12288 # num_px * num_px * 3
n_h = 7
n_y = 1
layers_dims = (n_x, n_h, n_y)
# GRADED FUNCTION: two_layer_model
def two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):
"""
Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (n_x, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- dimensions of the layers (n_x, n_h, n_y)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- If set to True, this will print the cost every 100 iterations
Returns:
parameters -- a dictionary containing W1, W2, b1, and b2
"""
np.random.seed(1)
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
(n_x, n_h, n_y) = layers_dims
# Initialize parameters dictionary, by calling one of the functions you'd previously implemented
### START CODE HERE ### (≈ 1 line of code)
parameters = initialize_parameters(n_x, n_h, n_y)
### END CODE HERE ###
# Get W1, b1, W2 and b2 from the dictionary parameters.
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: "X, W1, b1, W2, b2". Output: "A1, cache1, A2, cache2".
### START CODE HERE ### (≈ 2 lines of code)
A1, cache1 = linear_activation_forward(X, W1, b1, activation='relu')
A2, cache2 = linear_activation_forward(A1, W2, b2, activation='sigmoid')
### END CODE HERE ###
# Compute cost
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(A2, Y)
### END CODE HERE ###
# Initializing backward propagation
dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))
# Backward propagation. Inputs: "dA2, cache2, cache1". Outputs: "dA1, dW2, db2; also dA0 (not used), dW1, db1".
### START CODE HERE ### (≈ 2 lines of code)
dA1, dW2, db2 = linear_activation_backward(dA2, cache2, activation='sigmoid')
dA0, dW1, db1 = linear_activation_backward(dA1, cache1, activation='relu')
### END CODE HERE ###
# Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2
grads['dW1'] = dW1
grads['db1'] = db1
grads['dW2'] = dW2
grads['db2'] = db2
# Update parameters.
### START CODE HERE ### (approx. 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Retrieve W1, b1, W2, b2 from parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
```
Run the cell below to train your parameters. See if your model runs. The cost should be decreasing. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
```
parameters = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2500, print_cost=True)
```
**Expected Output**:
<table>
<tr>
<td> **Cost after iteration 0**</td>
<td> 0.6930497356599888 </td>
</tr>
<tr>
<td> **Cost after iteration 100**</td>
<td> 0.6464320953428849 </td>
</tr>
<tr>
<td> **...**</td>
<td> ... </td>
</tr>
<tr>
<td> **Cost after iteration 2400**</td>
<td> 0.048554785628770206 </td>
</tr>
</table>
Good thing you built a vectorized implementation! Otherwise it might have taken 10 times longer to train this.
Now, you can use the trained parameters to classify images from the dataset. To see your predictions on the training and test sets, run the cell below.
```
predictions_train = predict(train_x, train_y, parameters)
```
**Expected Output**:
<table>
<tr>
<td> **Accuracy**</td>
<td> 1.0 </td>
</tr>
</table>
```
predictions_test = predict(test_x, test_y, parameters)
```
**Expected Output**:
<table>
<tr>
<td> **Accuracy**</td>
<td> 0.72 </td>
</tr>
</table>
**Note**: You may notice that running the model on fewer iterations (say 1500) gives better accuracy on the test set. This is called "early stopping" and we will talk about it in the next course. Early stopping is a way to prevent overfitting.
Congratulations! It seems that your 2-layer neural network has better performance (72%) than the logistic regression implementation (70%, assignment week 2). Let's see if you can do even better with an $L$-layer model.
## 5 - L-layer Neural Network
**Question**: Use the helper functions you have implemented previously to build an $L$-layer neural network with the following structure: *[LINEAR -> RELU]$\times$(L-1) -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:
```python
def initialize_parameters_deep(layers_dims):
...
return parameters
def L_model_forward(X, parameters):
...
return AL, caches
def compute_cost(AL, Y):
...
return cost
def L_model_backward(AL, Y, caches):
...
return grads
def update_parameters(parameters, grads, learning_rate):
...
return parameters
```
```
### CONSTANTS ###
layers_dims = [12288, 20, 7, 5, 1] # 4-layer model
# GRADED FUNCTION: L_layer_model
def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (number of examples, num_px * num_px * 3)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization. (≈ 1 line of code)
### START CODE HERE ###
parameters = initialize_parameters_deep(layers_dims)
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
### START CODE HERE ### (≈ 1 line of code)
AL, caches = L_model_forward(X, parameters)
### END CODE HERE ###
# Compute cost.
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(AL, Y)
### END CODE HERE ###
# Backward propagation.
### START CODE HERE ### (≈ 1 line of code)
grads = L_model_backward(AL, Y, caches)
### END CODE HERE ###
# Update parameters.
### START CODE HERE ### (≈ 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
```
You will now train the model as a 4-layer neural network.
Run the cell below to train your model. The cost should decrease on every iteration. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
```
parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)
```
**Expected Output**:
<table>
<tr>
<td> **Cost after iteration 0**</td>
<td> 0.771749 </td>
</tr>
<tr>
<td> **Cost after iteration 100**</td>
<td> 0.672053 </td>
</tr>
<tr>
<td> **...**</td>
<td> ... </td>
</tr>
<tr>
<td> **Cost after iteration 2400**</td>
<td> 0.092878 </td>
</tr>
</table>
```
pred_train = predict(train_x, train_y, parameters)
```
<table>
<tr>
<td>
**Train Accuracy**
</td>
<td>
0.985645933014
</td>
</tr>
</table>
```
pred_test = predict(test_x, test_y, parameters)
```
**Expected Output**:
<table>
<tr>
<td> **Test Accuracy**</td>
<td> 0.8 </td>
</tr>
</table>
Congrats! It seems that your 4-layer neural network has better performance (80%) than your 2-layer neural network (72%) on the same test set.
This is good performance for this task. Nice job!
Though in the next course on "Improving deep neural networks" you will learn how to obtain even higher accuracy by systematically searching for better hyperparameters (learning_rate, layers_dims, num_iterations, and others you'll also learn in the next course).
## 6) Results Analysis
First, let's take a look at some images the L-layer model labeled incorrectly. This will show a few mislabeled images.
```
print_mislabeled_images(classes, test_x, test_y, pred_test)
```
**A few types of images the model tends to do poorly on include:**
- Cat body in an unusual position
- Cat appears against a background of a similar color
- Unusual cat color and species
- Camera Angle
- Brightness of the picture
- Scale variation (cat is very large or small in image)
## 7) Test with your own image (optional/ungraded exercise) ##
Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Change your image's name in the following code
4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
```
## START CODE HERE ##
my_image = "my_image.jpg" # change this to the name of your image file
my_label_y = [1] # the true class of your image (1 -> cat, 0 -> non-cat)
## END CODE HERE ##
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((num_px*num_px*3,1))
my_image = my_image/255.
my_predicted_image = predict(my_image, my_label_y, parameters)
plt.imshow(image)
print ("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
```
**References**:
- for auto-reloading external module: http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
| github_jupyter |
# Scraping Elsevier Metadata
This notebook is used for pulling metadata from articles via Scopus' literature search. It can technically be used to scrape abstracts from anywhere within Scopus' database, but we've specifically limited it to Elsevier journals as that is the only journal that we have access to the fulltext options from. Specifically, this sets up a way to pull PII identification numbers automatically.
To manually test queries, go to https://www.scopus.com/search/form.uri?display=advanced
Elsevier maintains a list of all journals in a single excel spreadsheet. The link to that elsevier active journals link: https://www.elsevier.com/__data/promis_misc/sd-content/journals/jnlactivesubject.xls
The whole of this scraping tool centers around `pybliometrics`, a prebuilt scraping package that interacts with the quirks of Scopus.
```
import pybliometrics
from pybliometrics.scopus import ScopusSearch
from pybliometrics.scopus.exception import Scopus429Error
import pandas as pd
import numpy as np
from sklearn.utils import shuffle
import os
import multiprocessing
from os import system, name
import json
import time
from IPython.display import clear_output
from pybliometrics.scopus import config
```
In order to use `pybliometrics`, you have to set up a config file on your computer. The best way to do that is to just use the built in command, `pybliometrics.scopus.utils.create_config()`. It will prompt you to enter an API key, so make sure you have one at the ready before you run this command. You can get one easily from https://dev.elsevier.com/documentation/SCOPUSSearchAPI.wadl with a quick registration.
```
#In addition to imports, the first time we ever run pybliometrics we need to config pybliometrics
#My API key: 646199a6755da12c28f3fdfe59bbfe55
#pybliometrics.scopus.utils.create_config()
```
You should note the above filepath location, as it's important to have this filepath for later function calls.
```
#Note your config path for pybliometrics: C:\Users\Jonathan/.scopus/config.ini
```
### Let's get into it - Time to walk through the algorithm!
List of things by which the algorithm will parse searches:
1. Year
2. Journal
3. Keyword search
So, we'll have to select a set of these parameters to fine-tune our search to get articles that'll be useful to us.
One of the first quick parameters that will help is to filter down the number of journals that we'll be searching through, and then organize them into a dataframe so we can continue to work through the data in later methods.
We'll first go through all the methods that we have, then we'll show you exactly how to use the methods with some examples.
The following method, `make_jlist`, creates a dataframe that only contains journals mentioning certain keywords in their 'Full_Category' column. Those keywords are passed directly to the method, though some default keywords can be used.
```
def make_jlist(jlist_url = 'https://www.elsevier.com/__data/promis_misc/sd-content/journals/jnlactivesubject.xls',
journal_strings = ['chemistry','energy','molecular','atomic','chemical','biochem'
,'organic','polymer','chemical engineering','biotech','colloid']):
"""
This method creates a dataframe of relevant journals to query. The dataframe contains two columns:
(1) The names of the Journals
(2) The issns of the Journals
As inputs, the URL for a journal list and a list of keyword strings to subselect the journals by is required.
These values currently default to Elsevier's journals and some chemical keywords.
"""
# This creates a dataframe of the active journals and their subjects from elsevier
active_journals = pd.read_excel(jlist_url)
# This makes the dataframe column names a smidge more intuitive.
active_journals.rename(columns = {'Display Category Full Name':'Full_Category','Full Title':'Journal_Title'}, inplace = True)
active_journals.Full_Category = active_journals.Full_Category.str.lower() # lowercase topics for searching
active_journals = active_journals.drop_duplicates(subset = 'Journal_Title') # drop any duplicate journals
active_journals = shuffle(active_journals, random_state = 42)
# new dataframe full of only journals who's topic description contained the desired keywords
active_journals = active_journals[active_journals['Full_Category'].str.contains('|'.join(journal_strings))]
#Select down to only the title and the individual identification number called ISSN
journal_frame = active_journals[['Journal_Title','ISSN']]
#Remove things that have were present in multiple name searches.
journal_frame = journal_frame.drop_duplicates(subset = 'Journal_Title')
return journal_frame
```
### The following method builds the keyword search portion of a query. There is an example below that can be copy-pasted into the Scopus advanced Search.
This method is a helper function, and you really shouldn't need to interact with it. It helps to combine several terms in a way that would be unnatural for us to type, but is necessary for online searching.
```
def build_search_terms(kwds):
"""
This builds the keyword search portion of the query string.
"""
combined_keywords = ""
for i in range(len(kwds)):
if i != len(kwds)-1:
combined_keywords += kwds[i] + ' OR '
else:
combined_keywords += kwds[i] + ' '
return combined_keywords
# Here is a model test query
# test = search(verbose = True, query = 'polymer OR organic OR molecular AND PUBYEAR IS 2019 AND ISSN(00404020)')
```
### The following method builds the entire query to be put into pybliometrics
The query requires a pretty specific format, so we are using a helper function to make it less obnoxious to deal with.
```
def build_query_dict(term_list, issn_list, year_list):
"""
This method takes the list of journals and creates a nested dictionary
containing all accessible queries, in each year, for each journal,
for a given keyword search on sciencedirect.
Parameters
----------
term_list(list, required): the list of search terms looked for in papers by the api.
issn_list(list, required): the list of journal issn's to be queried. Can be created by getting the '.values'
of a 'journal_list' dataframe that has been created from the 'make_jlist' method.
year_list(list, required): the list of years which will be searched through
"""
search_terms = build_search_terms(term_list)
dict1 = {}
#This loop goes through and sets up a dictionary key with an ISSN number
for issn in issn_list:
issn_terms = ' AND ISSN(' + issn + ')'
dict2 = {}
#This loop goes and attaches all the years to the outer loop's key.
for year in year_list:
year_terms = "AND PUBYEAR IS " + str(year)
querystring = search_terms + year_terms + issn_terms
dict2[year] = querystring
dict1[issn] = dict2
return dict1
```
Ok, we can either run this with a single process, or we can multiprocess our way to victory. Either way, the first thing we need to do is define a set of functions that will follow our list of journals, as well as a set of outlined years, and search for a list of terms within those journals and years.
### Here is a method to clear the cache. Doesn't matter too much because 1.1 million pubs stored in cache only took 2 GB of memory
BE CAREFUL WITH THIS. IT CAN DELETE EVERYTHING ON YOUR COMPUTER IF YOU MESS IT UP. But it can be useful if your cache starts to get too full and take up too much memory.
```
def clear_cache(cache_path):
"""
Be very careful with this method. It can delete your entire computer if you let it.
"""
# if the cache path contains the proper substring, and if the files we are deleting are of the propper length, delete the files
if '.scopus/scopus_search/' in cache_path:
for file in os.listdir(cache_path):
# Making sure the deleted files match the standard length of pybliometrics cache output
if len(file) == len('8805245317ccb15059e3cfa219be2dd4'):
os.remove(cache_path + file)
```
### The method below loops through the entire journal list and collects article metadata (ie, not full-text), including PII
Unfotunately, collecting fulltext is not possible with this API. We have another method, `Elsevier_fulltext_api.py`, which takes in this metadata information and is able to pull out a full length article.
Things we probably want to just grab because we have them:
1. Author names
2. Author keywords
3. Cited by count
4. title
5. PII
6. DOI
7. Description
```
def get_piis(term_list, journal_frame, year_list, cache_path, output_path, keymaster=False, fresh_keys=None, config_path='/Users/DavidCJ/.scopus/config.ini'):
"""
This should be a standalone method that recieves a list of journals (issns), a keyword search,
an output path and a path to clear the cache. It should be mappable to multiple parallel processes.
"""
if output_path[-1] is not '/':
raise Exception('Output file path must end with /')
if '.scopus/scopus_search' not in cache_path:
raise Exception('Cache path is not a sub-directory of the scopus_search. Make sure cache path is correct.')
# Two lists who's values correspond to each other
issn_list = journal_frame['ISSN'].values
journal_list = journal_frame['Journal_Title'].values
# Find and replaces slashes and spaces in names for file storage purposes
for j in range(len(journal_list)):
if ':' in journal_list[j]:
journal_list[j] = journal_list[j].replace(':','')
elif '/' in journal_list[j]:
journal_list[j] = journal_list[j].replace('/','_')
elif ' ' in journal_list[j]:
journal_list[j] = journal_list[j].replace(' ','_')
# Build the dictionary that can be used to sequentially query elsevier for different journals and years
query_dict = build_query_dict(term_list,issn_list,year_list)
# Must write to memory, clear cache, and clear a dictionary upon starting every new journal
for i in range(len(issn_list)):
# At the start of every year, clear the standard output screen
os.system('cls' if os.name == 'nt' else 'clear')
paper_counter = 0
issn_dict = {}
for j in range(len(year_list)):
# for every year in every journal, query the keywords
print(f'{journal_list[i]} in {year_list[j]}.')
# Want the sole 'keymaster' process to handle 429 responses by swapping the key.
if keymaster:
try:
query_results = ScopusSearch(verbose = True,query = query_dict[issn_list[i]][year_list[j]])
except Scopus429Error:
print('entered scopus 429 error loop... replacing key')
newkey = fresh_keys.pop(0)
config["Authentication"]["APIKey"] = newkey
time.sleep(5)
query_results = ScopusSearch(verbose = True,query = query_dict[issn_list[i]][year_list[j]])
print('key swap worked!!')
# If this process isn't the keymaster, try a query.
# If it excepts, wait a few seconds for keymaster to replace key and try again.
else:
try:
query_results = ScopusSearch(verbose = True,query = query_dict[issn_list[i]][year_list[j]])
except Scopus429Error:
print('Non key master is sleeping for 15... ')
time.sleep(15)
query_results = ScopusSearch(verbose = True,query = query_dict[issn_list[i]][year_list[j]]) # at this point, the scopus 429 error should be fixed...
print('Non key master slept, query has now worked.')
# store relevant information from the results into a dictionary pertaining to that query
year_dict = {}
if query_results.results is not None:
# some of the query results might be of type None
for k in range(len(query_results.results)):
paper_counter += 1
result_dict = {}
result = query_results.results[k]
result_dict['pii'] = result.pii
result_dict['doi'] = result.doi
result_dict['title'] = result.title
result_dict['num_authors'] = result.author_count
result_dict['authors'] = result.author_names
result_dict['description'] = result.description
result_dict['citation_count'] = result.citedby_count
result_dict['keywords'] = result.authkeywords
year_dict[k] = result_dict
# Store all of the results for this year in the dictionary containing to a certain journal
issn_dict[year_list[j]] = year_dict
else:
# if it was a None type, we will just store the empty dictionary as json
issn_dict[year_list[j]] = year_dict
# Store all of the results for this journal in a folder as json file
os.mkdir(f'{output_path}{journal_list[i]}')
with open(f'{output_path}{journal_list[i]}/{journal_list[i]}.json','w') as file:
json.dump(issn_dict, file)
with open(f'{output_path}{journal_list[i]}/{journal_list[i]}.txt','w') as file2:
file2.write(f'This file contains {paper_counter} publications.')
```
****
## Example Time!
Ok, now that we've shown all the methods, let's investigate their usage. We'll walk through linearly, so feel free to use these cells to figure out and run your own scraping efforts.
First thing's first, we need to call the `make_jlist` method and pass it anything we want to search by, and receive a dataframe of our downselected set of journals. You will get a warning from this method call, but it's not a big deal. It's an underlying weirdness of the pandas.read_excel function.
```
journal_list = make_jlist(jlist_url = 'https://www.elsevier.com/__data/promis_misc/sd-content/journals/jnlactivesubject.xls',
journal_strings = ['chemistry','synthesis','molecular','chemical','organic','polymer','materials'])
#Print out to show you the structure of the journal dataframe
journal_list.head()
# example of how to use the dictionary builder
issn_list = journal_list['ISSN'].values
dictionary = build_query_dict(term_list, issn_list, range(1995,2021))
#This shows a specific journal ISSN, and specific year selected.
dictionary['00404020'][2015]
#List of Jon's API Keys - Feel free (please do) to grab your own quickly from the Scopus website.
apikeylist = ['646199a6755da12c28f3fdfe59bbfe55','f23e69765c41a3a6e042eb9baf73bd77','f6dafc105b5adfe25105eb658aa80b7c',
' e9f7c3a33c7bf1b790372d25a8fbb5a1', '2e57cbb3c25fa9e446a8fd0e58be91e9', '1bed2480701164024b1a644843c76099']
```
Ok, now we'll go ahead and set up the other search terms and cache paths, so we can run our full `get_piis` method.
```
cache_path = '/Users/Jonathan/.scopus/scopus_search/COMPLETE/'
term_list = ['polymer','organic','molecular', 'chemistry', 'synthesis']
```
Ok, with search terms in hand, and a downselected journal list, we're ready to go and scrape papers!
```
get_piis(term_list,journal_frame,range(1995,2021),cache_path=cache_path,output_path = '/Users/Jonathan/Desktop/pyblio_test/', keymaster = True, fresh_keys = apikeylist)
```
### Further development work that isn't quite yet fully done/working?
```
def multiprocess(term_list, journal_frame, year_list, cache_path, output_path, keymaster = False, fresh_keys = None, config_path = '/Users/Jonathan/.scopus/config.ini', split_ratio = 2):
"""asdfoinasdfoin"""
split_list = np.array_split(journal_frame, split_ratio)
processes = []
for k in range(split_ratio):
print("Before multiprocessing")
p = multiprocessing.Process(target = get_piis, args = [term_list, split_list[k], year_list, cache_path, output_path, keymaster, fresh_keys, config_path])
print("after multiprocessing")
p.start()
processes.append(p)
for process in processes:
process.join()
multiprocess(term_list, journal_frame, range(1995,2021), cache_path=cache_path, output_path = '/Users/Jonathan/Desktop/pyblio_test2/', keymaster = True, fresh_keys = fresh_keys, config_path = '/Users/Jonathan/.scopus/config.ini', split_ratio = 2)
# split_ratio = 3
# split_list = np.array_split(journal_frame, split_ratio)
# for k in range(split_ratio):
# p = multiprocessing.Process(target = get_piis, args = [term_list,split_list[k],range(1995,2021),cache_path,'/Volumes/My Passport/Davids Stuff/pyblio_test3/', True, fresh_keys])
# p.start()
#First, we need to split our list of journals in half
df1, df2 = np.array_split(journal_frame,2)
p1 = multiprocessing.Process(target = get_piis, args = [term_list,df1,range(1995,2021),cache_path,'/Volumes/My Passport/Davids Stuff/pyblio_test3/'])
p2 = multiprocessing.Process(target = get_piis, args = [term_list,df2,range(1995,2021),cache_path,'/Volumes/My Passport/Davids Stuff/pyblio_test3/',True,fresh_keys])
#p3 = multiprocessing.Process(target = get_piis, args = [term_list,df3,range(1995,2021),cache_path,'/Volumes/My Passport/Davids Stuff/pyblio_test2/'])
#p4 = multiprocessing.Process(target = get_piis, args = [term_list,df4,range(1995,2021),cache_path,'/Volumes/My Passport/Davids Stuff/pyblio_test2/'])
p1.start()
p2.start()
#p3.start()
#p4.start()
# starttime=time.time()
# while True:
# clear_cache(cache_path)
# clear_output()
# time.sleep(20.0 - ((time.time() - starttime) % 20.0))
p1.join()
p2.join()
#p3.join()
#p4.join()
```
### Stuff below is for counting how many publications are located in an output directory
```
def absoluteFilePaths(directory):
for dirpath,_,filenames in os.walk(directory):
for f in filenames:
yield os.path.abspath(os.path.join(dirpath, f))
file2 = open('/Volumes/My Passport/Davids Stuff/pyblio_test/Gene: X/Gene: X.txt','r')
file2.readline()
def count_pubs(output_path):
count = 0
for path in absoluteFilePaths(output_path):
if 'txt' in path and '._' not in path:
file = open(path,'r')
#print(path)
a = sum([int(s) for s in string.split() if s.isdigit()])
count+=a
return count
count_pubs('/Users/Jonathan/Desktop/pyblio_test/')
```
| github_jupyter |
```
import torch
from torch import nn
import torchvision
from torchvision.datasets import ImageFolder
from torchvision import transforms
from torch.utils.data import DataLoader
from pathlib import Path
from torchvision.models import resnet101
import sys
sys.path.append("..")
from video_classification.datasets import FolderOfFrameFoldersDataset, FrameWindowDataset
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
ROOT = Path("/home/ubuntu/SupervisedVideoClassification")
DATA_ROOT = Path(ROOT/"data")
train_transforms = transforms.Compose([
transforms.ColorJitter(),
transforms.RandomHorizontalFlip(p=0.25),
transforms.RandomVerticalFlip(p=0.25),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),
])
valid_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),
])
train_ds = FolderOfFrameFoldersDataset(DATA_ROOT/'train',
transform=train_transforms,
base_class=FrameWindowDataset,
window_size=3,
overlapping=True,)
valid_ds = FolderOfFrameFoldersDataset(DATA_ROOT/'validation',
transform=valid_transforms,
base_class=FrameWindowDataset,
window_size=3,
overlapping=True,)
from torch import nn
from torchvision.models import resnet101
from video_classification.models.mlp import MLP
class SingleImageResNetModel(nn.Module):
def __init__(self, mlp_sizes=[768, 128, 2]):
super().__init__()
resnet = resnet101(pretrained=True)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.clf = MLP(2048, mlp_sizes)
self.freeze_resnet()
def forward(self, x):
x = self.resnet(x).squeeze()
x = self.clf(x)
return x
def freeze_resnet(self):
for p in self.resnet.parameters():
p.requires_grad = False
def unfreeze_resnet(self):
for p in self.resnet.parameters():
p.requires_grad = True
import torch
from torch import nn
from video_classification.models.mlp import MLP
class MultiImageModel(nn.Module):
def __init__(self,
window_size=3,
single_mlp_sizes=[768, 128],
joint_mlp_sizes=[64, 2]):
super().__init__()
self.window_size = window_size
self.single_mlp_sizes = single_mlp_sizes
self.joint_mlp_sizes = joint_mlp_sizes
self.single_image_models = nn.ModuleList([SingleImageResNetModel(self.single_mlp_sizes) for _ in range(window_size)])
self.in_features = self.single_mlp_sizes[-1] * self.window_size
self.clf = MLP(self.in_features, joint_mlp_sizes)
def forward(self, x):
# x is of size [B, T, C, H, W]. In other words, a batch of windows.
# each img for the same window goes through SingleImageModel
x = x.transpose(0, 1) # -> [T, B, C, H, W]
encoded_windows = [m(window) for m, window in zip(self.single_image_models, x)] # List of len T, each elem of size [B, single_mlp_sizes[-1]]
x = torch.cat(encoded_windows, dim=1)
# x is now of size [B, T * single_mlp_sizes[-1]]
x = self.clf(x)
# Now size is [B, joint_mlp_sizes[-1]] which should always be 2
return x
def freeze_single_image_model(self):
# Freeze the VGG classifier
for p in self.single_image_model.parameters():
p.requires_grad = False
def unfreeze_single_image_model(self):
# Unfreeze the VGG classifier. Training the whole VGG is a no-go, so we only train the classifier part.
for p in self.single_image_model.clf.parameters():
p.requires_grad = True
model = MultiImageModel(
window_size=3,
single_mlp_sizes=[1024, 256],
joint_mlp_sizes=[128, 2])
model = model.to(device)
x = torch.stack([train_ds[0][0], train_ds[1][0], train_ds[2][0], train_ds[3][0]]).to(device)
model(x)
from video_classification.trainer import Trainer
classes_weights = torch.Tensor([0.3, 1.0]).to(device)
criterion = nn.CrossEntropyLoss(weight=classes_weights)
trainer = Trainer(train_ds,
valid_ds,
model,
criterion,
"multi_frame_resnet101_differentMLP",
str(ROOT/'checkpoints'),
device=device,
amp_opt_level="O1",
cycle_mult=0.9,
)
from tqdm.autonotebook import tqdm
trainer.train(lr=1e-3,
batch_size=128,
n_epochs=40,
gradient_accumulation_steps=2,
num_workers=8,
max_gradient_norm=2.0,
)
import pandas as pd
df = pd.DataFrame(trainer.epoch_state).T
df
df['f1'].argmax()
```
| github_jupyter |
```
import os
os.chdir("..")
"""
Iterate over the PubMED articles that mention infecious diseases from the
disease ontology.
"""
import rdflib
from pylru import lrudecorator
import pubcrawler.article as pubcrawler
from annotator.keyword_annotator import KeywordAnnotator
from annotator.annotator import AnnoDoc
import re
import json
import pymongo
print("Loading disease ontology...")
disease_ontology = rdflib.Graph()
disease_ontology.parse(
"http://purl.obolibrary.org/obo/doid.owl",
format="xml"
)
print("disease ontology loaded")
disease_ontology
def get_annotation_keywords():
qres = disease_ontology.query("""
prefix oboInOwl: <http://www.geneontology.org/formats/oboInOwl#>
prefix obo: <http://purl.obolibrary.org/obo/>
prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?entity ?label
WHERE {
# only resolve diseases by infectious agent
?entity rdfs:subClassOf* obo:DOID_0050117
; oboInOwl:hasNarrowSynonym|oboInOwl:hasRelatedSynonym|oboInOwl:hasExactSynonym|rdfs:label ?label
}
""")
def remove_parenthetical_notes(label):
label = re.sub(r"\s\(.*\)","", label)
label = re.sub(r"\s\[.*\]","", label)
assert(len(label) > 0)
return label
return list(set([remove_parenthetical_notes(str(r[1])) for r in qres]))
with open("annotation_keywords", "w+") as f:
for item in get_annotation_keywords():
f.write("{}\n".format(item))
def str_escape(s):
return json.dumps(s)[1:-1]
@lrudecorator(500)
def resolve_keyword(keyword):
query = """
prefix oboInOwl: <http://www.geneontology.org/formats/oboInOwl#>
prefix obo: <http://purl.obolibrary.org/obo/>
prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?entity
WHERE {
# only resolve diseases by infectious agent
?entity rdfs:subClassOf* obo:DOID_0050117
; oboInOwl:hasNarrowSynonym|oboInOwl:hasRelatedSynonym|oboInOwl:hasExactSynonym|rdfs:label ?label
FILTER regex(?label, "^(""" + str_escape(re.escape(keyword)) + str_escape("(\s[\[\(].*[\]\)])*") + """)$", "i")
}
"""
qres = list(disease_ontology.query(query))
if len(qres) == 0:
print("no match for", keyword.encode('ascii', 'xmlcharrefreplace'))
elif len(qres) > 1:
print("multiple matches for", keyword.encode('ascii', 'xmlcharrefreplace'))
print(qres)
return qres
def annotated_keywords_to_dict_list(keywords):
seen_keys = []
keyword_list = []
for keyword_entity in keywords:
keyword, uri = keyword_entity
if keyword in seen_keys:
continue
else:
keys.append(keyword)
keyword_dict = {
"keyword": keyword,
"uri": uri[0].entity.toPython()
}
keyword_list.append(keyword_dict)
return(keyword_list)
def write_article_meta_to_mongo(article, collection):
pc_article = pubcrawler.Article(article)
anno_doc = AnnoDoc(pc_article.body)
anno_doc.add_tier(keyword_annotator)
infectious_diseases = [
(disease.text, resolve_keyword(disease.text))
for disease in anno_doc.tiers['keywords'].spans
]
disease_ontology_keywords = None if len(infectious_diseases) == 0 else annotated_keywords_to_dict_list(infectious_diseases)
collection.update_one({'_id': article['_id']},
{
'$set':
{
'meta':
{
'article-ids': pc_article.pub_ids(),
'article-type': pc_article.article_type(),
# 'pub-dates': pc_article.pub_dates()
# Need to fix stuff with dates in Mongo
'keywords': pc_article.keywords()
},
'annotations':
{
'disease-ontology-keywords': disease_ontology_keywords
}
},
})
def iterate_infectious_disease_articles(collection):
query = {}
if args.no_reannotation:
query = {'meta': {'$exists': False}}
total_articles = collection.count(query)
processed_articles = 0
for article in collection.find(query):
processed_articles += 1
print("Processing article {} of {} ({:.2}%)...".format(processed_articles, total_articles, processed_articles / total_articles), end="")
write_article_meta_to_mongo(article, collection=collection)
print(" Done!")
db = pymongo.MongoClient('localhost')['pmc']
articles = db.articlesubset
cursor = articles.find({'meta': {'$exists': False}})
cursor.count()
x = cursor.next()
x
x['_id'] = "test"
articles.delete_one({'_id': "test"})
articles.insert_one(x)
x = articles.find_one({'_id': 'test'})
x
keyword_annotator = KeywordAnnotator(keywords=get_annotation_keywords())
write_article_meta_to_mongo(x, articles)
x = articles.find_one({'_id': 'test', 'annotations.disease-ontology-keywords': {'$exists': True}})
x
articles.delete_one({'_id': "test"})
for article, infectious_diseases in iterate_infectious_disease_articles(db.articlesubset):
print(article['_id'], infectious_diseases)
print("")
total_article_count
article_with_body_count
infectious_disease_article_count
def strip_article_meta(collection):
collection.update_many({'meta': {'$exists': True}},
{
'$unset':
{
'meta': "",
'annotations': ""
}
})
strip_article_meta(articles)
```
| github_jupyter |
## Preliminaries
```
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
# Set ipython's max row display
pd.set_option('display.max_row', 1000)
# Set iPython's max column width to 50
pd.set_option('display.max_columns', 50)
```
## Create dataframe
```
df = pd.read_csv('https://www.dropbox.com/s/52cb7kcflr8qm2u/5kings_battles_v1.csv?dl=1')
df.head()
```
## Make plot with bins of fixed size
```
# Make two variables of the attacker and defender size, but leaving out
# cases when there are over 10000 attackers
data1 = df['attacker_size'][df['attacker_size'] < 90000]
data2 = df['defender_size'][df['attacker_size'] < 90000]
# Create bins of 2000 each
bins = np.arange(data1.min(), data2.max(), 2000) # fixed bin size
# Plot a histogram of attacker size
plt.hist(data1,
bins=bins,
alpha=0.5,
color='#EDD834',
label='Attacker')
# Plot a histogram of defender size
plt.hist(data2,
bins=bins,
alpha=0.5,
color='#887E43',
label='Defender')
# Set the x and y boundaries of the figure
plt.ylim([0, 10])
# Set the title and labels
plt.title('Histogram of Attacker and Defender Size')
plt.xlabel('Number of troops')
plt.ylabel('Number of battles')
plt.legend(loc='upper right')
plt.show()
```
## Make plot with fixed number of bins
```
# Make two variables of the attacker and defender size, but leaving out
# cases when there are over 10000 attackers
data1 = df['attacker_size'][df['attacker_size'] < 90000]
data2 = df['defender_size'][df['attacker_size'] < 90000]
# Create 10 bins with the minimum
# being the smallest value of data1 and data2
bins = np.linspace(min(data1 + data2),
# the max being the highest value
max(data1 + data2),
# and divided into 10 bins
10)
# Plot a histogram of attacker size
plt.hist(data1,
# with bins defined as
bins=bins,
# with alpha
alpha=0.5,
# with color
color='#EDD834',
# labelled attacker
label='Attacker')
# Plot a histogram of defender size
plt.hist(data2,
# with bins defined as
bins=bins,
# with alpha
alpha=0.5,
# with color
color='#887E43',
# labeled defender
label='Defender')
# Set the x and y boundaries of the figure
plt.ylim([0, 10])
# Set the title and labels
plt.title('Histogram of Attacker and Defender Size')
plt.xlabel('Number of troops')
plt.ylabel('Number of battles')
plt.legend(loc='upper right')
plt.show()
```
| github_jupyter |
# About This Notebook
This notebook shows how to implement **Low-Rank Tensor Completion with Truncated Nuclear Norm minimization (LRTC-TNN)** on some real-world data sets. For an in-depth discussion of LRTC-TNN, please see our article [1].
<div class="alert alert-block alert-info">
<font color="black">
<b>[1]</b> Xinyu Chen, Jinming Yang, Lijun Sun (2020). <b>A Nonconvex Low-Rank Tensor Completion Model for Spatiotemporal Traffic Data Imputation</b>. arXiv.2003.10271. <a href="https://arxiv.org/abs/2003.10271" title="PDF"><b>[PDF]</b></a>
</font>
</div>
## Quick Run
This notebook is publicly available at [https://github.com/xinychen/tensor-learning](https://github.com/xinychen/tensor-learning).
## Low-Rank Tensor Completion
We start by importing the necessary dependencies.
```
import numpy as np
from numpy.linalg import inv as inv
```
### Tensor Unfolding (`ten2mat`) and Matrix Folding (`mat2ten`)
Using numpy reshape to perform 3rd rank tensor unfold operation. [[**link**](https://stackoverflow.com/questions/49970141/using-numpy-reshape-to-perform-3rd-rank-tensor-unfold-operation)]
```
def ten2mat(tensor, mode):
return np.reshape(np.moveaxis(tensor, mode, 0), (tensor.shape[mode], -1), order = 'F')
def mat2ten(mat, tensor_size, mode):
index = list()
index.append(mode)
for i in range(tensor_size.shape[0]):
if i != mode:
index.append(i)
return np.moveaxis(np.reshape(mat, list(tensor_size[index]), order = 'F'), 0, mode)
```
### Singular Value Thresholding (SVT) for TNN
```
def svt_tnn(mat, alpha, rho, theta):
"""This is a Numpy dependent singular value thresholding (SVT) process."""
u, s, v = np.linalg.svd(mat, full_matrices = False)
vec = s.copy()
vec[theta :] = s[theta :] - alpha / rho
vec[vec < 0] = 0
return np.matmul(np.matmul(u, np.diag(vec)), v)
```
**Potential alternative for this**:
This is a competitively efficient algorithm for implementing SVT-TNN.
```
def svt_tnn(mat, alpha, rho, theta):
tau = alpha / rho
[m, n] = mat.shape
if 2 * m < n:
u, s, v = np.linalg.svd(mat @ mat.T, full_matrices = False)
s = np.sqrt(s)
idx = np.sum(s > tau)
mid = np.zeros(idx)
mid[:theta] = 1
mid[theta:idx] = (s[theta : idx] - tau) / s[theta : idx]
return (u[:,:idx] @ np.diag(mid)) @ (u[:, :idx].T @ mat)
elif m > 2 * n:
return svt_tnn(mat.T, tau, theta).T
u, s, v = np.linalg.svd(mat, full_matrices = 0)
idx = np.sum(s > tau)
vec = s[:idx].copy()
vec[theta : idx] = s[theta : idx] - tau
return u[:, :idx] @ np.diag(vec) @ v[:idx, :]
```
<div class="alert alert-block alert-warning">
<ul>
<li><b><code>compute_mape</code>:</b> <font color="black">Compute the value of Mean Absolute Percentage Error (MAPE).</font></li>
<li><b><code>compute_rmse</code>:</b> <font color="black">Compute the value of Root Mean Square Error (RMSE).</font></li>
</ul>
</div>
> Note that $$\mathrm{MAPE}=\frac{1}{n} \sum_{i=1}^{n} \frac{\left|y_{i}-\hat{y}_{i}\right|}{y_{i}} \times 100, \quad\mathrm{RMSE}=\sqrt{\frac{1}{n} \sum_{i=1}^{n}\left(y_{i}-\hat{y}_{i}\right)^{2}},$$ where $n$ is the total number of estimated values, and $y_i$ and $\hat{y}_i$ are the actual value and its estimation, respectively.
```
def compute_rmse(var, var_hat):
return np.sqrt(np.sum((var - var_hat) ** 2) / var.shape[0])
def compute_mape(var, var_hat):
return np.sum(np.abs(var - var_hat) / var) / var.shape[0]
```
### Define LRTC-TNN Function with `Numpy`
```
def LRTC(dense_tensor, sparse_tensor, alpha, rho, theta, epsilon, maxiter):
"""Low-Rank Tenor Completion with Truncated Nuclear Norm, LRTC-TNN."""
dim = np.array(sparse_tensor.shape)
pos_missing = np.where(sparse_tensor == 0)
pos_test = np.where((dense_tensor != 0) & (sparse_tensor == 0))
X = np.zeros(np.insert(dim, 0, len(dim))) # \boldsymbol{\mathcal{X}}
T = np.zeros(np.insert(dim, 0, len(dim))) # \boldsymbol{\mathcal{T}}
Z = sparse_tensor.copy()
last_tensor = sparse_tensor.copy()
snorm = np.sqrt(np.sum(sparse_tensor ** 2))
it = 0
while True:
rho = min(rho * 1.05, 1e5)
for k in range(len(dim)):
X[k] = mat2ten(svt_tnn(ten2mat(Z - T[k] / rho, k), alpha[k], rho, np.int(np.ceil(theta * dim[k]))), dim, k)
Z[pos_missing] = np.mean(X + T / rho, axis = 0)[pos_missing]
T = T + rho * (X - np.broadcast_to(Z, np.insert(dim, 0, len(dim))))
tensor_hat = np.einsum('k, kmnt -> mnt', alpha, X)
tol = np.sqrt(np.sum((tensor_hat - last_tensor) ** 2)) / snorm
last_tensor = tensor_hat.copy()
it += 1
print('Iter: {}'.format(it))
print('Tolerance: {:.6}'.format(tol))
print('MAPE: {:.6}'.format(compute_mape(dense_tensor[pos_test], tensor_hat[pos_test])))
print('RMSE: {:.6}'.format(compute_rmse(dense_tensor[pos_test], tensor_hat[pos_test])))
print()
if (tol < epsilon) or (it >= maxiter):
break
print('Total iteration: {}'.format(it))
print('Tolerance: {:.6}'.format(tol))
print('Imputation MAPE: {:.6}'.format(compute_mape(dense_tensor[pos_test], tensor_hat[pos_test])))
print('Imputation RMSE: {:.6}'.format(compute_rmse(dense_tensor[pos_test], tensor_hat[pos_test])))
print()
return tensor_hat
```
## Missing Data Imputation
In the following, we apply the above defined LRTC-TNN function to the task of missing data imputation on the following spatiotemporal multivariate time series datasets/matrices:
- **PeMS data set**: [PeMS traffic speed data set](https://doi.org/10.5281/zenodo.3939793).
### PeMS-4W
We generate **random missing (RM)** values on PeMS data set.
```
import numpy as np
import pandas as pd
np.random.seed(1000)
data = pd.read_csv('../datasets/California-data-set/pems-4w.csv', header = None)
dense_tensor = mat2ten(data.values, np.array([data.values.shape[0], 288, 4 * 7]), 0)
random_tensor = np.random.rand(data.values.shape[0], 288, 4 * 7)
missing_rate = 0.3
### Random missing (RM) scenario:
binary_tensor = np.round(random_tensor + 0.5 - missing_rate)
sparse_tensor = np.multiply(dense_tensor, binary_tensor)
del data, random_tensor, binary_tensor
import time
start = time.time()
alpha = np.ones(3) / 3
rho = 1e-4
theta = 0.05
epsilon = 1e-3
maxiter = 100
tensor_hat = LRTC(dense_tensor, sparse_tensor, alpha, rho, theta, epsilon, maxiter)
end = time.time()
print('Running time: %.2f minutes' % ((end - start)/60.0))
import numpy as np
import pandas as pd
np.random.seed(1000)
data = pd.read_csv('../datasets/California-data-set/pems-4w.csv', header = None)
dense_tensor = mat2ten(data.values, np.array([data.values.shape[0], 288, 4 * 7]), 0)
random_tensor = np.random.rand(data.values.shape[0], 288, 4 * 7)
missing_rate = 0.7
### Random missing (RM) scenario:
binary_tensor = np.round(random_tensor + 0.5 - missing_rate)
sparse_tensor = np.multiply(dense_tensor, binary_tensor)
del data, random_tensor, binary_tensor
import time
start = time.time()
alpha = np.ones(3) / 3
rho = 1e-4
theta = 0.05
epsilon = 1e-3
maxiter = 100
tensor_hat = LRTC(dense_tensor, sparse_tensor, alpha, rho, theta, epsilon, maxiter)
end = time.time()
print('Running time: %.2f minutes' % ((end - start)/60.0))
```
We generate **non-random missing (NM)** values on PeMS data set. Then, we conduct the imputation experiment.
```
import numpy as np
import pandas as pd
np.random.seed(1000)
data = pd.read_csv('../datasets/California-data-set/pems-4w.csv', header = None)
dense_tensor = mat2ten(data.values, np.array([data.values.shape[0], 288, 4 * 7]), 0)
random_matrix = np.random.rand(data.values.shape[0], 4 * 7)
missing_rate = 0.3
### Non-random missing (NM) scenario:
binary_tensor = np.zeros(dense_tensor.shape)
for i1 in range(dense_tensor.shape[0]):
for i2 in range(dense_tensor.shape[2]):
binary_tensor[i1, :, i2] = np.round(random_matrix[i1, i2] + 0.5 - missing_rate)
sparse_tensor = np.multiply(dense_tensor, binary_tensor)
del data, random_matrix, binary_tensor
import time
start = time.time()
alpha = np.ones(3) / 3
rho = 1e-5
theta = 0.05
epsilon = 1e-3
maxiter = 100
tensor_hat = LRTC(dense_tensor, sparse_tensor, alpha, rho, theta, epsilon, maxiter)
end = time.time()
print('Running time: %.2f minutes' % ((end - start)/60.0))
import numpy as np
import pandas as pd
np.random.seed(1000)
data = pd.read_csv('../datasets/California-data-set/pems-4w.csv', header = None)
dense_tensor = mat2ten(data.values, np.array([data.values.shape[0], 288, 4 * 7]), 0)
random_matrix = np.random.rand(data.values.shape[0], 4 * 7)
missing_rate = 0.7
### Non-random missing (NM) scenario:
binary_tensor = np.zeros(dense_tensor.shape)
for i1 in range(dense_tensor.shape[0]):
for i2 in range(dense_tensor.shape[2]):
binary_tensor[i1, :, i2] = np.round(random_matrix[i1, i2] + 0.5 - missing_rate)
sparse_tensor = np.multiply(dense_tensor, binary_tensor)
del data, random_matrix, binary_tensor
import time
start = time.time()
alpha = np.ones(3) / 3
rho = 1e-6
theta = 0.05
epsilon = 1e-3
maxiter = 100
tensor_hat = LRTC(dense_tensor, sparse_tensor, alpha, rho, theta, epsilon, maxiter)
end = time.time()
print('Running time: %.2f minutes' % ((end - start)/60.0))
```
### PeMS-8W
We generate **random missing (RM)** values on PeMS data set.
```
import numpy as np
import pandas as pd
np.random.seed(1000)
data = pd.read_csv('../datasets/California-data-set/pems-8w.csv', header = None)
dense_tensor = mat2ten(data.values, np.array([data.values.shape[0], 288, 8 * 7]), 0)
random_tensor = np.random.rand(data.values.shape[0], 288, 8 * 7)
missing_rate = 0.3
### Random missing (RM) scenario:
binary_tensor = np.round(random_tensor + 0.5 - missing_rate)
sparse_tensor = np.multiply(dense_tensor, binary_tensor)
del data, random_tensor, binary_tensor
import time
start = time.time()
alpha = np.ones(3) / 3
rho = 1e-4
theta = 0.05
epsilon = 1e-3
maxiter = 100
tensor_hat = LRTC(dense_tensor, sparse_tensor, alpha, rho, theta, epsilon, maxiter)
end = time.time()
print('Running time: %.2f minutes' % ((end - start)/60.0))
import numpy as np
import pandas as pd
np.random.seed(1000)
data = pd.read_csv('../datasets/California-data-set/pems-8w.csv', header = None)
dense_tensor = mat2ten(data.values, np.array([data.values.shape[0], 288, 8 * 7]), 0)
random_tensor = np.random.rand(data.values.shape[0], 288, 8 * 7)
missing_rate = 0.7
### Random missing (RM) scenario:
binary_tensor = np.round(random_tensor + 0.5 - missing_rate)
sparse_tensor = np.multiply(dense_tensor, binary_tensor)
del data, random_tensor, binary_tensor
import time
start = time.time()
alpha = np.ones(3) / 3
rho = 1e-4
theta = 0.05
epsilon = 1e-3
maxiter = 100
tensor_hat = LRTC(dense_tensor, sparse_tensor, alpha, rho, theta, epsilon, maxiter)
end = time.time()
print('Running time: %.2f minutes' % ((end - start)/60.0))
```
We generate **non-random missing (NM)** values on PeMS data set. Then, we conduct the imputation experiment.
```
import numpy as np
import pandas as pd
np.random.seed(1000)
data = pd.read_csv('../datasets/California-data-set/pems-8w.csv', header = None)
dense_tensor = mat2ten(data.values, np.array([data.values.shape[0], 288, 8 * 7]), 0)
random_matrix = np.random.rand(data.values.shape[0], 8 * 7)
missing_rate = 0.3
### Non-random missing (NM) scenario:
binary_tensor = np.zeros(dense_tensor.shape)
for i1 in range(dense_tensor.shape[0]):
for i2 in range(dense_tensor.shape[2]):
binary_tensor[i1, :, i2] = np.round(random_matrix[i1, i2] + 0.5 - missing_rate)
sparse_tensor = np.multiply(dense_tensor, binary_tensor)
del data, random_matrix, binary_tensor
import time
start = time.time()
alpha = np.ones(3) / 3
rho = 1e-5
theta = 0.05
epsilon = 1e-3
maxiter = 100
tensor_hat = LRTC(dense_tensor, sparse_tensor, alpha, rho, theta, epsilon, maxiter)
end = time.time()
print('Running time: %.2f minutes' % ((end - start)/60.0))
import numpy as np
import pandas as pd
np.random.seed(1000)
data = pd.read_csv('../datasets/California-data-set/pems-8w.csv', header = None)
dense_tensor = mat2ten(data.values, np.array([data.values.shape[0], 288, 8 * 7]), 0)
random_matrix = np.random.rand(data.values.shape[0], 8 * 7)
missing_rate = 0.7
### Non-random missing (NM) scenario:
binary_tensor = np.zeros(dense_tensor.shape)
for i1 in range(dense_tensor.shape[0]):
for i2 in range(dense_tensor.shape[2]):
binary_tensor[i1, :, i2] = np.round(random_matrix[i1, i2] + 0.5 - missing_rate)
sparse_tensor = np.multiply(dense_tensor, binary_tensor)
del data, random_matrix, binary_tensor
import time
start = time.time()
alpha = np.ones(3) / 3
rho = 1e-6
theta = 0.05
epsilon = 1e-3
maxiter = 100
tensor_hat = LRTC(dense_tensor, sparse_tensor, alpha, rho, theta, epsilon, maxiter)
end = time.time()
print('Running time: %.2f minutes' % ((end - start)/60.0))
```
### License
<div class="alert alert-block alert-danger">
<b>This work is released under the MIT license.</b>
</div>
| github_jupyter |
# Sequence Parameters
## *Sequence Type*: predefined sequence or waveform upload?
Many use cases require the freedom to define waveforms on a sample-basis. The `"Simple"` sequence type provided by the `zhinst-toolkit` allows for exactly that. If the *Simple* sequence is configured, the user can add waveforms to a queue of waveforms to be uploaded to the AWG Core. All the queued up waveforms will then be played in that order. The waveforms are defiend as simple `numpy` arrays, with every value in the array corresponding to one sample. Since the waveforms are defined purely by samples, the duration of the waveform depends on the *sampling rate* of the AWG Core.
Instead of uploading waveforms defined as *numpy* arrays, the `zhinst-toolkit` also provides pre-defined sequences for standard experiments. These differ between UHFQA and HDAWG and include:
* Rabi (HDAWG)
* T1 (HDAWG)
* T2 Ramsey (HDAWG)
* Trigger (HDAWG)
* Continuous-Wave (CW) Spectroscopy (UHFQA)
* Pulsed Spectroscopy (UHFQA)
* Multiplexed Readout (UHFQA)
## Definition of a sequence: *period* and time origin
The parameter `period` defines the duration in seconds of one single shot of the experiment. The start of each period in the sequence is defined by a trigger signal (either the AWG sends or receives a trigger). The time origin `t=0` is the reference point for all waveform. It is defined to not coincide with the trigger signal but to be a well defined time after the trigger. This way, it is possible to shift waveforms to *negative time* with respect to the orogin `t=0`.
The parameter `dead_time` describes the the time in seconds after the time origin `t=0` before the next period begins, i.e. before the next trigger signal is sent or expected. The time origin is thus at `period - dead_time` after the trigger signal. The *dead time* is important to keep in mind because it defines the maximum length of a waveform that can be played *after* `t=0`. If the waveform is longer than *dead time*, the next trigger signal will be missed. The *dead time* defaults to *5 us*.
```
Trigger Signal Time Origin t=0
+ + +
| | |
| | |
| | |
+--------------------------------------------------------------+--------------------------------+
| | |
| | |
| <------------------------------------+ Period +------------------------------------------> |
| | |
| | <--------+ Dead Time +------> |
+ + +
```
## *Repetitions*
The parameter `repetitions` defines the number of repetitions in the outer loop of the experiment. For example in an experiment with a single waveform (e.g. a single queued waveform or a Rabi or T1 experiment with a single point) the waveform will be repeated `repetitions` times for a total of `1 x repetitions` shots.
```
+---+ +---+
| |
| + XXXX + |
| | XXXXXX | |
| +------------------XXXXXXXXXX----------+ |
| | | | X ( Repetitions )
| | <---------+ Period +---------> | |
| + + |
| |
+---+ +---+
```
The corresponding pulse sequence would look like this:
```
+ XXXX + XXXX + XXXX + XXXX +
| XXXXXX | XXXXXX | XXXXXX | XXXXXX |
+-----------XXXXXXXXXX-----+---------XXXXXXXXXX------+-------XXXXXXXXXX------+----------XXXXXXXXXX------+ - - -
| | | | |
+ + + + +
```
Note however that for an experiment with multiple points in the inner loop (e.g. multiple waveforms in the queue or a Rabi or T1 experiment with multiple points) the parameter `repetitions` only refers to the outer loop. For example a Rabi experiment with `N` different amplitudes
```
+---+ +---+
| XX |
| + + XXX + XXXX + |
| | XXXX | XXXXX | XXXXXX | |
| +-------------XXXXXX-----+-------------XXXXXXX----+-----------XXXXXXXX ---+ |
| | | | | | X ( Repetitions )
| | Amp 1 | Amp ... | Amp N | |
| + + + + |
| |
+---+ +---+
```
would mean `N x repetitions` shots in total.
## Waveform *alignment*
The waveforms within each period are always played relative to the time origin at `t=0`. Often, it is desireable to start one waveform right after another waveform has ended. In order to allow gapless playback on different AWGs, the parameter `alignment` defines if the waveform should end at the time origin or start with the time origin.
* `alignment = "End with Trigger"`: the waveform is played *before* the time origin `t=0` and ends at `t=0`
```
Time Origin t=0
+
Waveform |
+ XXXXXXXXXXXXXXXXXXX | +
| XXXXXXXXXXXXXXXXXXXXX| |
| Alignment: "End with Trigger" XXXXXXXXXXXXXXXXXXXXXXX |
| XXXXXXXXXXXXXXXXXXXXXXX |
+----------------------------------------XXXXXXXXXXXXXXXXXXXXXXX--------------------------------+
| |
| |
| |
+ +
```
* `alignment = "Start with Trigger"`: the waveform is played *after* the time origin `t=0` and starts at `t=0`
```
Time Origin t=0
+
| Waveform
+ | XXXXXXXXXXXXXXXXXXXXXX +
| |XXXXXXXXXXXXXXXXXXXXXXXX |
| Alignment: "Start with Trigger" XXXXXXXXXXXXXXXXXXXXXXXXXX |
| XXXXXXXXXXXXXXXXXXXXXXXXXX |
+--------------------------------------------------------------XXXXXXXXXXXXXXXXXXXXXXXXXX-------+
| |
| |
| |
+ +
```
The parameter `alignment` can also be used with the Enum `Alignment` available in the `zhinst-toolkit`:
```python
from zhinst.toolkit import Alignment
hdawg.awgs[0].set_sequence_params(alignment="Start with Trigger")
hdawg.awgs[0].set_sequence_params(alignment=Alignment.START_WITH_TRIGGER)
```
## Using multiple AWGs: *trigger mode*
The correct trigger configuration is key to enable the aligned playback of waveforms over multiple AWG Cores and instruments. The standard configuration within the `zhinst-toolkit` assumes a **Master Trigger** configuration with one AWG Core that sends out the trigger signal to all other AWG Cores in the experiment, including on different instruments like another HDAWG or a UHFQA.
```
HDAWG 1
+-------------+
+-------<-----+ AWG 1 | Master Trigger
| +-------------+
+-------+------> AWG 2 |
| |-------------+
+------> AWG 3 |
| |-------------+
+------> AWG 4 |
| +-------------+
|
| HDAWG 2
| +-------------+
+------> AWG 1 |
| |-------------+
+------> AWG 2 |
| |-------------+
+------> AWG 3 |
| |-------------+
+------> AWG 4 |
| +-------------+
|
| UHFQA
| +-------------+
+------> |
+-------------+
```
On the HDAWG, the trigger output of a AWG Core is labelled with *'Mark'* , the trigger input is labelled *'Trig'*. By convention, every AWG Core uses the *Mark* and *Trig* of the lower channel. On the UHFQA the *Ref/Trig 1* us used for both sending and receiving triggers.
```
Out 1 ^ ^ Out 2 Out 3 ^ ^ Out 4
| | | |
| | | |
AWG 1 | | AWG 2 | |
+------------+----+------------+ +------------+----+------------+
| O O | | O O |
| O O O O | | O O O O |
+-------+----------------------+ +--+---------------------------+
| ^
| Mark 1 | Trig In 3
v |
+----->------>------->------->-----+
```
The parameter `trigger_mode` specifies whether the AWG Core is used as a master trigger, expects a trigger or is used by itself.
* `trigger_mode = "Send Trigger"`: send a trigger signal at the start of each period to act as a *master trigger* AWG Core
* `trigger_mode = "External Trigger"`: wait for an external trigger signal at the start of every period, the AWG Core is triggered by another AWG Core
```
Time Origin t=0
Waveform +
|
AWG 1 + XXXXXX | +
| XXXXXXXXXX | |
Trigger Mode: | XXXXXXXXXXXX | Alignment: |
"Send Trigger" | XXXXXXXXXXXXXX | "End with Trigger" |
+-------------------------------------XXXXXXXXXXXXXXXXXX---------------------+
| | |
| | |
| Send Trigger Signal | |
| (Mark 1) | |
\ | / | |
\|/ | |
v | |
| |
AWG 2 + | Waveform |
| | |
Trigger Mode: | Wait for Trigger Signal | XXXXXX |
"External Trigger" | (Trigger 3) | XXXXXXXXXX |
\ | / Alignment: | XXXXXXXXXXXX |
\|/ "Start with Trigger" | XXXXXXXXXXXXXX |
v------------------------------------------------------XXXXXXXXXXXXXXXXXX----+
```
If the *trigger mode* of all AWG Cores is configured correctly, their *time origins* coincide and the waveforms on all AWGs can be aligned with respect to `t=0`. Note that other sequence parameters like `period`, `dead_time` , `repetitions` have to be identical for the different AWG Cores in order to work together as expected!
* `trigger_mode = "None"`: to not send out a trigger signal at the start of every period, this is the default setting and can be used when the AWG Core does not have to trigger any other AWGs
The parameter `trigger_mode` can also be used with the Enum `TriggerMode` available in the `zhinst-toolkit`:
```python
from zhinst.toolkit import TriggerMode
hdawg.awgs[0].set_sequence_params(trigger_mode="Send Trigger")
hdawg.awgs[0].set_sequence_params(trigger_mode=TriggerMode.SEND_TRIGGER)
```
### *Trigger delay*
An additional parameter `trigger_delay` can be used to shift the reference point for a given AWG Core. The parameter acts as a delay in the trigger signal. All waveforms in the sequence will be shifted by *trigger_delay* with respect to the time origin `t=0`.
```
Time Origin t=0
Trigger Mode: +
| "External Trigger" |
| | Waveform
| Alignment: | XXXX
\|/ "Start with Trigger" | XXXXXXXX +
v | XXXXXXXXXX |
+------------------------------------+----------XXXXXXXXXXXX------+
| | | |
+ + <------> + +
Trigger Delay
Time Origin t=0
Trigger Mode: +
| "External Trigger" |
| Waveform
| Alignment: | XXXX
\|/ "End with Trigger" XXXXXXXX +
v XXXXXXXXXX |
+----------------------------------XXXXXXXXXXXX-------------------+
| | | |
+ +<------>+ +
Trigger Delay
```
Have a look at the examples provided for more information on how to trigger AWG Cores and how to program typical experiments with the HDAWG and UHFQA.
| github_jupyter |
# ChainerRL Quickstart Guide
This is a quickstart guide for users who just want to try ChainerRL for the first time.
If you have not yet installed ChainerRL, run the command below to install it:
```
pip install chainerrl
```
If you have already installed ChainerRL, let's begin!
First, you need to import necessary modules. The module name of ChainerRL is `chainerrl`. Let's import `gym` and `numpy` as well since they are used later.
```
import chainer
import chainer.functions as F
import chainer.links as L
import chainerrl
import gym
import numpy as np
```
ChainerRL can be used for any problems if they are modeled as "environments". [OpenAI Gym](https://github.com/openai/gym) provides various kinds of benchmark environments and defines the common interface among them. ChainerRL uses a subset of the interface. Specifically, an environment must define its observation space and action space and have at least two methods: `reset` and `step`.
- `env.reset` will reset the environment to the initial state and return the initial observation.
- `env.step` will execute a given action, move to the next state and return four values:
- a next observation
- a scalar reward
- a boolean value indicating whether the current state is terminal or not
- additional information
- `env.render` will render the current state.
Let's try 'CartPole-v0', which is a classic control problem. You can see below that its observation space consists of four real numbers while its action space consists of two discrete actions.
```
env = gym.make('CartPole-v0')
print('observation space:', env.observation_space)
print('action space:', env.action_space)
obs = env.reset()
env.render()
print('initial observation:', obs)
action = env.action_space.sample()
obs, r, done, info = env.step(action)
print('next observation:', obs)
print('reward:', r)
print('done:', done)
print('info:', info)
```
Now you have defined your environment. Next, you need to define an agent, which will learn through interactions with the environment.
ChainerRL provides various agents, each of which implements a deep reinforcement learning algorithm.
To use [DQN (Deep Q-Network)](https://doi.org/10.1038/nature14236), you need to define a Q-function that receives an observation and returns an expected future return for each action the agent can take. In ChainerRL, you can define your Q-function as `chainer.Link` as below. Note that the outputs are wrapped by `chainerrl.action_value.DiscreteActionValue`, which implements `chainerrl.action_value.ActionValue`. By wrapping the outputs of Q-functions, ChainerRL can treat discrete-action Q-functions like this and [NAFs (Normalized Advantage Functions)](https://arxiv.org/abs/1603.00748) in the same way.
```
class QFunction(chainer.Chain):
def __init__(self, obs_size, n_actions, n_hidden_channels=50):
super().__init__()
with self.init_scope():
self.l0 = L.Linear(obs_size, n_hidden_channels)
self.l1 = L.Linear(n_hidden_channels, n_hidden_channels)
self.l2 = L.Linear(n_hidden_channels, n_actions)
def __call__(self, x, test=False):
"""
Args:
x (ndarray or chainer.Variable): An observation
test (bool): a flag indicating whether it is in test mode
"""
h = F.tanh(self.l0(x))
h = F.tanh(self.l1(h))
return chainerrl.action_value.DiscreteActionValue(self.l2(h))
obs_size = env.observation_space.shape[0]
n_actions = env.action_space.n
q_func = QFunction(obs_size, n_actions)
```
If you want to use CUDA for computation, as usual as in Chainer, call `to_gpu`.
```
# Uncomment to use CUDA
# q_func.to_gpu(0)
```
You can also use ChainerRL's predefined Q-functions.
```
_q_func = chainerrl.q_functions.FCStateQFunctionWithDiscreteAction(
obs_size, n_actions,
n_hidden_layers=2, n_hidden_channels=50)
```
As in Chainer, `chainer.Optimizer` is used to update models.
```
# Use Adam to optimize q_func. eps=1e-2 is for stability.
optimizer = chainer.optimizers.Adam(eps=1e-2)
optimizer.setup(q_func)
```
A Q-function and its optimizer are used by a DQN agent. To create a DQN agent, you need to specify a bit more parameters and configurations.
```
# Set the discount factor that discounts future rewards.
gamma = 0.95
# Use epsilon-greedy for exploration
explorer = chainerrl.explorers.ConstantEpsilonGreedy(
epsilon=0.3, random_action_func=env.action_space.sample)
# DQN uses Experience Replay.
# Specify a replay buffer and its capacity.
replay_buffer = chainerrl.replay_buffer.ReplayBuffer(capacity=10 ** 6)
# Since observations from CartPole-v0 is numpy.float64 while
# Chainer only accepts numpy.float32 by default, specify
# a converter as a feature extractor function phi.
phi = lambda x: x.astype(np.float32, copy=False)
# Now create an agent that will interact with the environment.
agent = chainerrl.agents.DoubleDQN(
q_func, optimizer, replay_buffer, gamma, explorer,
replay_start_size=500, update_interval=1,
target_update_interval=100, phi=phi)
```
Now you have an agent and an environment. It's time to start reinforcement learning!
In training, use `agent.act_and_train` to select exploratory actions. `agent.stop_episode_and_train` must be called after finishing an episode. You can get training statistics of the agent via `agent.get_statistics`.
```
n_episodes = 200
max_episode_len = 200
for i in range(1, n_episodes + 1):
obs = env.reset()
reward = 0
done = False
R = 0 # return (sum of rewards)
t = 0 # time step
while not done and t < max_episode_len:
# Uncomment to watch the behaviour
# env.render()
action = agent.act_and_train(obs, reward)
obs, reward, done, _ = env.step(action)
R += reward
t += 1
if i % 10 == 0:
print('episode:', i,
'R:', R,
'statistics:', agent.get_statistics())
agent.stop_episode_and_train(obs, reward, done)
print('Finished.')
```
Now you finished training the agent. How good is the agent now? You can test it by using `agent.act` and `agent.stop_episode` instead. Exploration such as epsilon-greedy is not used anymore.
```
for i in range(10):
obs = env.reset()
done = False
R = 0
t = 0
while not done and t < 200:
env.render()
action = agent.act(obs)
obs, r, done, _ = env.step(action)
R += r
t += 1
print('test episode:', i, 'R:', R)
agent.stop_episode()
```
If test scores are good enough, the only remaining task is to save the agent so that you can reuse it. What you need to do is to simply call `agent.save` to save the agent, then `agent.load` to load the saved agent.
```
# Save an agent to the 'agent' directory
agent.save('agent')
# Uncomment to load an agent from the 'agent' directory
# agent.load('agent')
```
RL completed!
But writing code like this every time you use RL might be boring. So, ChainerRL has utility functions that do these things.
```
# Set up the logger to print info messages for understandability.
import logging
import sys
gym.undo_logger_setup() # Turn off gym's default logger settings
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='')
chainerrl.experiments.train_agent_with_evaluation(
agent, env,
steps=2000, # Train the agent for 2000 steps
eval_n_runs=10, # 10 episodes are sampled for each evaluation
max_episode_len=200, # Maximum length of each episodes
eval_interval=1000, # Evaluate the agent after every 1000 steps
outdir='result') # Save everything to 'result' directory
```
That's all of the ChainerRL quickstart guide. To know more about ChainerRL, please look into the `examples` directory and read and run the examples. Thank you!
| github_jupyter |
```
import os
import fnmatch
import pprint
import csv
from tqdm import tqdm
import numpy as np
import pandas as pd
import scipy.io as sio
from scipy.linalg import sqrtm
from analysis_clustering_helpers import get_cvfold_crossmodal_recon
cvsets_pth = './data/results/patchseq/reconstructions/'
metadata_file = './data/raw/PS_v2_beta_0-4.mat'
matfile = sio.loadmat(metadata_file,squeeze_me=True)
file_list = sorted([file for file in os.listdir(cvsets_pth) if 'summary' in file])
```
**Same-modality and cross modality reconstructions**:
```
D = {}
for key in ['cs_0-0_','cs_1-0_','cs_10-0_','cs_100-0_']:
D[key] = np.empty((0,4))
for f,file in enumerate(tqdm(file_list)):
if key in file:
train_paired,val_paired,train_leaf,val_leaf = get_cvfold_crossmodal_recon(cvfile=cvsets_pth+file,refdata=matfile,full_data=True)
#Reconstructions are normalized with the norm of the data for reconstructed modality.
#This enables comparison of magnitude of errors in different modalities on the same plot.
norm_T = np.sum(val_leaf['T_x']**2,axis = 1)
norm_E = np.sum(val_leaf['E_x']**2,axis = 1)
xT_from_zT = np.mean(np.sum((val_leaf['T_x']-val_leaf['xT_from_zT'])**2,axis=1))/np.mean(norm_T)
xT_from_zE = np.mean(np.sum((val_leaf['T_x']-val_leaf['xT_from_zE'])**2,axis=1))/np.mean(norm_T)
xE_from_zE = np.mean(np.sum((val_leaf['E_x']-val_leaf['xE_from_zE'])**2,axis=1))/np.mean(norm_E)
xE_from_zT = np.mean(np.sum((val_leaf['E_x']-val_leaf['xE_from_zT'])**2,axis=1))/np.mean(norm_E)
D[key] = np.append(D[key],[[xT_from_zT, xT_from_zE, xE_from_zE, xE_from_zT]],axis=0)
TfromT = np.array([np.mean(D['cs_0-0_'][:,0]),np.mean(D['cs_1-0_'][:,0]),np.mean(D['cs_10-0_'][:,0]),np.mean(D['cs_100-0_'][:,0])])
TfromT_err = np.array([np.std(D['cs_0-0_'][:,0]),np.std(D['cs_1-0_'][:,0]),np.std(D['cs_10-0_'][:,0]),np.std(D['cs_100-0_'][:,0])])
TfromE = np.array([np.mean(D['cs_0-0_'][:,1]),np.mean(D['cs_1-0_'][:,1]),np.mean(D['cs_10-0_'][:,1]),np.mean(D['cs_100-0_'][:,1])])
TfromE_err = np.array([np.std(D['cs_0-0_'][:,1]),np.std(D['cs_1-0_'][:,1]),np.std(D['cs_10-0_'][:,1]),np.std(D['cs_100-0_'][:,1])])
EfromE = np.array([np.mean(D['cs_0-0_'][:,2]),np.mean(D['cs_1-0_'][:,2]),np.mean(D['cs_10-0_'][:,2]),np.mean(D['cs_100-0_'][:,2])])
EfromE_err = np.array([np.std(D['cs_0-0_'][:,2]),np.std(D['cs_1-0_'][:,2]),np.std(D['cs_10-0_'][:,2]),np.std(D['cs_100-0_'][:,2])])
EfromT = np.array([np.mean(D['cs_0-0_'][:,3]),np.mean(D['cs_1-0_'][:,3]),np.mean(D['cs_10-0_'][:,3]),np.mean(D['cs_100-0_'][:,3])])
EfromT_err = np.array([np.std(D['cs_0-0_'][:,3]),np.std(D['cs_1-0_'][:,3]),np.std(D['cs_10-0_'][:,3]),np.std(D['cs_100-0_'][:,3])])
import seaborn as sns
%matplotlib inline
sns.set_style('ticks')
cs = np.array([0+0.1,1.0,10.0,100.0])
ind = np.array([1,2,3]).astype(int)
ccT=['#0504aa','#0165fc']
ccE=['#8f1402','#e50000']
legend_txt_size = 24
ax_txt_size = 24
fig_4C = plt.figure(figsize=(7, 7))
ax = plt.subplot(1,1,1)
plt.sca(ax)
eprops={'fmt':'.-', 'ms': 10, 'mec': 'None', 'mew': 2,'elinewidth': 2, 'capsize': 5}
Ts = plt.errorbar(cs[ind],TfromT[ind]/TfromT[0],TfromT_err[ind],
c=ccT[0],mfc=ccT[0],ecolor=ccT[0], **eprops,
label=r'${x}_{t}$ $\rightarrow$ $z_{t}$ $\rightarrow$ $\widetilde{x}_{t}$')
Tc = plt.errorbar(cs[ind],TfromE[ind]/TfromT[0],TfromE_err[ind],
c=ccT[1],mfc=ccT[1],ecolor=ccT[1], **eprops,
label=r'${x}_{e}$ $\rightarrow$ $z_{e}$ $\rightarrow$ $\widetilde{x}_{t}$',)
Es = plt.errorbar(cs[ind],EfromE[ind]/EfromE[0],EfromE_err[ind],
c=ccE[0],mfc=ccE[0],ecolor=ccE[0], **eprops,
label=r'${x}_{e}$ $\rightarrow$ $z_{e}$ $\rightarrow$ $\widetilde{x}_{e}$')
Ec = plt.errorbar(cs[ind],EfromT[ind]/EfromE[0],EfromT_err[ind],
c=ccE[1],mfc=ccE[1],ecolor=ccE[1], **eprops,
label=r'${x}_{t}$ $\rightarrow$ $z_{t}$ $\rightarrow$ $\widetilde{x}_{e}$')
ax.set_xscale('log')
ax.set_xlim(0.8,130)
ax.set_ylim(0.4,2)
ax.set_yticks(np.arange(0.5,2.01,0.5))
plt.grid(True,which="both",ls="--",c='lightgray')
ax.set_xlabel('$\lambda$')
ax.set_ylabel(r'$\dfrac{\left\langle \left\|\|{x}-\widetilde{x} \right\|\|^2 \right\rangle}{\left\langle \left\|\|{x}-\widetilde{x} \right\|\|^2_{\lambda=0} \right\rangle}$')
plt.legend(handles = [Ts,Tc,Es,Ec],prop={'size': legend_txt_size},loc=4,frameon=True,edgecolor='white',facecolor='white', framealpha=0.5)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(ax_txt_size)
fig_4C.savefig('/Users/fruity/Dropbox/AllenInstitute/CellTypes/doc/NeurIPS 2019/camready/Fig4C.pdf',
dpi=300, format='pdf', bbox_inches='tight')
```
**Helper functions to plot data and reconstructions for individual cells**
```
import seaborn as sns
def plot_exp(true_gene_exp,pred_gene_exp,pred_gene_id,celltype,fileid):
sns.set_style("ticks")
true_col = '#0165fc'
pred_col = '#e50000'
fig_T = plt.figure(figsize=[8,4])
plt.bar(np.arange(np.size(true_gene_exp)),np.ravel(true_gene_exp),tick_label=pred_gene_id,
facecolor=true_col,ecolor=true_col,alpha=0.4,label='True')
plt.bar(np.arange(np.size(pred_gene_exp)),np.ravel(pred_gene_exp),tick_label=pred_gene_id,
facecolor=pred_col,ecolor=pred_col,alpha=0.4,label='Predicted')
ax=plt.gca()
for i,item in enumerate(ax.get_xticklabels()):
item.set_rotation(90)
item.set_fontsize(12)
#ax.set_xlabel('Neuropeptide genes',labelpad=15)
ax.set_xticks([])
#ax.set_ylabel(r'Expression: $\log_{e}$[cpm+1]')
ax.set_ylim(0,9)
ax.set_yticks(np.arange(0,9,1))
ax.yaxis.set_ticklabels(['0','','','','4','','','','8',''])
ax.tick_params('both', length=5, which='major')
ax.set_title(celltype,pad=30,verticalalignment='top')
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label]+ax.get_yticklabels()):
item.set_fontsize(24)
plt.tight_layout()
return fig_T
def plot_features(true_E_features,pred_E_features,celltype,fileid):
sns.set_style("ticks")
true_col = '#0165fc'
pred_col = '#e50000'
fig_E = plt.figure(figsize=(8,4))
plt.bar(np.arange(np.size(true_E_features)),np.ravel(true_E_features),
facecolor=true_col,
ecolor=true_col,alpha=0.4,label='True')
plt.bar(np.arange(np.size(pred_E_features)),np.ravel(pred_E_features),
facecolor=pred_col,
ecolor=pred_col,alpha=0.4,label='Predicted')
ax=plt.gca()
for i,item in enumerate(ax.get_xticklabels()):
item.set_rotation(90)
item.set_fontsize(12)
#ax.set_xlabel('sPCA Components')
ax.set_ylim(-3,3)
ax.set_xlim(-1,54)
ax.set_yticks(np.arange(-3.1,3.1,0.5))
ax.yaxis.set_ticklabels(['','','-2','','','','0','','','','2','',''])
ax.set_xticks([])
ax.tick_params('both', length=5, which='major')
ax.set_title(celltype,pad=30,verticalalignment='top')
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(24)
plt.tight_layout()
return fig_E
```
**E $\rightarrow$ T**
```
#Neuropeptides list:
gene_id_pep = np.array(['Vip', 'Npy', 'Sst', 'Penk', 'Tac2', 'Cck', 'Crh', 'Tac1', 'Pdyn', 'Cort', 'Pthlh', 'Pnoc',
'Adcyap1', 'Trh', 'Grp', 'Nmb', 'Nts', 'Rln1', 'Vipr1', 'Vipr2', 'Npy1r', 'Npy2r', 'Npy5r',
'Sstr1', 'Sstr2', 'Sstr3', 'Sstr4', 'Oprd1', 'Oprm1', 'Tacr3', 'Cckbr', 'Crhr1', 'Crhr2',
'Tacr1', 'Oprk1', 'Pth1r', 'Oprl1', 'Adcyap1r1', 'Trhr', 'Trhr2', 'Grpr', 'Nmbr', 'Ntsr1',
'Ntsr2', 'Rxfp1', 'Rxfp2', 'Rxfp3'])
#Subset present among the 1252 genes used in the study:
pred_gene_id = gene_id_pep[np.isin(gene_id_pep,matfile['gene_id'])]
D = {}
ii=1
specific_cell_ids = [2291,2543]
for key in ['cs_10-0_']:
for f,file in enumerate(tqdm(file_list)):
if key in file:
train_paired,val_paired,train_leaf,val_leaf = get_cvfold_crossmodal_recon(cvfile=cvsets_pth+file,refdata=matfile,full_data=True)
gene_inds = np.isin(matfile['gene_id'],pred_gene_id)
for i in range(np.size(val_leaf['labels'])):
if val_leaf['T_ind'][i] in specific_cell_ids:
print(val_leaf['T_ind'][i])
celltype = val_leaf['labels'][i]
gene_exp = val_leaf['xT_from_zE'][i,:]
pred_gene_exp = gene_exp[gene_inds]
true_gene_exp = matfile['T_dat'][val_leaf['T_ind'][i],:]
true_gene_exp = true_gene_exp[gene_inds]
fig_4A = plot_exp(true_gene_exp,pred_gene_exp,pred_gene_id,celltype,val_leaf['T_ind'][i])
fig_4A.savefig('/Users/fruity/Dropbox/AllenInstitute/CellTypes/doc/NeurIPS 2019/camready/Fig4A_'+str(i)+'.pdf',
dpi=300, format='pdf', bbox_inches='tight')
ii=ii+1
plt.show()
```
**T $\rightarrow$ E**
```
specific_cell_ids = [2291,2543]
D = {}
ii=1
for key in ['cs_10-0_']:
for f,file in enumerate(tqdm(file_list)):
if key in file:
train_paired,val_paired,train_leaf,val_leaf = get_cvfold_crossmodal_recon(cvfile=cvsets_pth+file,refdata=matfile,full_data=True)
for i in range(np.size(val_leaf['labels'])):
if val_leaf['E_ind'][i] in specific_cell_ids:
celltype = val_leaf['labels'][i]
pred_E_features = val_leaf['xE_from_zT'][i,:]
true_E_features = matfile['E_dat'][val_leaf['E_ind'][i],:]
fig_4B = plot_features(true_E_features,pred_E_features,celltype,val_leaf['E_ind'][i])
fig_4B.savefig('/Users/fruity/Dropbox/AllenInstitute/CellTypes/doc/NeurIPS 2019/camready/Fig4B_'+str(ii)+'.pdf',
dpi=300, format='pdf', bbox_inches='tight')
ii=ii+1
```
| github_jupyter |
```
import tensorflow as tf
import tensorflow.contrib.layers as layers
from sklearn import datasets
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import pandas as pd
import numpy as np
import seaborn as sns
from keras.models import Sequential
from keras.layers import Dense
from sklearn.model_selection import GridSearchCV
import keras
%matplotlib inline
# Data
boston = datasets.load_boston()
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df['target'] = boston.target
#Understanding Data
df.describe()
# Plotting correlation color map
_ , ax = plt.subplots( figsize =( 12 , 10 ) )
corr = df.corr(method='pearson')
cmap = sns.diverging_palette( 220 , 10 , as_cmap = True )
_ = sns.heatmap(
corr,
cmap = cmap,
square=True,
cbar_kws={ 'shrink' : .9 },
ax=ax,
annot = True,
annot_kws = { 'fontsize' : 12 })
# Create Test Train Split
X_train, X_test, y_train, y_test = train_test_split(df [['RM', 'LSTAT', 'PTRATIO']], df[['target']], test_size=0.3, random_state=0)
# Normalize data
X_train = np.array(MinMaxScaler().fit_transform(X_train)) # Need to convert dataframe to np.array for use with Keras
y_train = np.array(MinMaxScaler().fit_transform(y_train))
X_test = np.array(MinMaxScaler().fit_transform(X_test))
y_test = np.array(MinMaxScaler().fit_transform(y_test))
#Network Parameters
m = len(X_train)
n = 3 # Number of features
n_hidden = 20 # Number of hidden neurons
# Build Model
model = Sequential()
model.add(Dense(n_hidden, input_dim=n, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary() # Summarize the model
#Compile model
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(X_train, y_train, validation_data=(X_test, y_test),epochs=5, batch_size=10, verbose=1)
def get_model():
model = Sequential()
model.add(Dense(n_hidden, input_dim=n, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
return model
# Hyperparameters Tuning
epochs = [50, 60, 70]
batches = [5, 10, 20]
rmse_min = 0.04
for epoch in epochs:
for batch in batches:
model = get_model()
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(X_train, y_train, validation_data=(X_test, y_test),epochs=epoch, batch_size=batch, verbose=1)
y_test_pred = model.predict(X_test)
rmse = mean_squared_error( y_test, y_test_pred )
if rmse < rmse_min:
rmse_min = rmse
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.hdf5")
print("Saved model to disk")
```
| github_jupyter |
# Portfolio Management with Amazon SageMaker RL
Portfolio management is the process of constant redistribution of a capital into a set of different financial assets. Given the historic prices of a list of stocks and current portfolio allocation, the goal is to maximize the return while restraining the risk. In this demo, we use a reinforcement learning framework to manage the portfolio by continuously reallocating several stocks. Based on the setup in [1], we use a tensor input constructed from historical price data, and then apply an actor-critic policy gradient algorithm to accommodate the continuous actions (reallocations). The customized environment is constructed using Open AI Gym and the RL agents are trained using Amazon SageMaker.
[1] Jiang, Zhengyao, Dixing Xu, and Jinjun Liang. "[A deep reinforcement learning framework for the financial portfolio management problem." arXiv preprint arXiv:1706.10059 (2017)](https://arxiv.org/abs/1706.10059).
## Problem Statement
We start with $m$ preselected stocks. Without loss of generality, the total investment value is set as 1 dollar at the initial timestamp. At timestamp $t$, letting $v_{m,t}$ denote the closing price of stock $m$, the *price relative vector* is defined as
$$ y_t = ( 1, \frac{v_{1,t}}{v_{1,t-1}}, \frac{v_{2,t}}{v_{2,t-1}}, \dots, \frac{v_{m,t}}{v_{m,t-1}} ). $$
The first element corresponds to the cash we maintain. The cash value doesn't change along time so it is always 1. During training, the investment redistribution at step $t$ is characterized by the portfolio weight vector $\mathbf{\omega} = (\omega_{0,t}, \omega_{1,t}, \dots, \omega_{m,t})$.
1. *Objective:*
The portfolio consists of a group of stocks. We aim to maximize the portfolio value by adjusting the weights of each stock and reallocating the portfolio at the end of each day.
2. *Environment:*
Custom developed environment using Gym.
3. *States:*
Portfolio weight vector from last trading day $\omega_{t-1}$. Historic price tensor constructed using close, open, high, low prices of each stock. For more details, please refer to [1].
4. *Actions:*
New weight vector $\omega_{t}$ satisfying $\sum_{i=0}^{m}\omega_{i,t}=1$.
5. *Reward:*
Average logarithmic cumulated return. Consider a trading cost factor $\mu$, the average logarithmic cumulated return after timestamp $T$ is $$ R := \frac{1}{T} \sum_{t=1}^{T+1} \ln(\mu_{t}y_{t}\cdot\omega_{t-1}).$$
We use the maximum rate at Poloniex and set $\mu=0.25\%$.
## Dataset
In this notebook, we use the dataset generated by [Chi Zhang](https://github.com/vermouth1992/drl-portfolio-management/tree/master/src/utils/datasets). It contains the historic price of 16 target stocks from NASDAQ100, including open, close, high and low prices from 2012-08-13 to 2017-08-11. Specifically, those stocks are: “AAPL”, “ATVI”, “CMCSA”, “COST”, “CSX”, “DISH”, “EA”, “EBAY”, “FB”, “GOOGL”, “HAS”, “ILMN”, “INTC”, “MAR”, “REGN” and “SBUX”.
### Dataset License
This dataset is licensed under a MIT License.
Copyright (c) 2017 Chi Zhang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
## Using reinforcement learning on Amazon SageMaker RL
Amazon SageMaker RL allows you to train your RL agents using an on-demand and fully managed infrastructure. You do not have to worry about setting up your machines with the RL toolkits and deep learning frameworks as there are pre-built RL environments. You can easily switch between many different machines setup for you, including powerful GPU machines that give a big speedup. You can also choose to use multiple machines in a cluster to further speedup training, often necessary for production level loads.
## Pre-requisites
### Roles and permissions
To get started, we'll import the Python libraries we need, set up the environment with a few prerequisites for permissions and configurations.
```
import sagemaker
import boto3
import sys
import os
import glob
import re
import subprocess
from IPython.display import HTML
import time
from time import gmtime, strftime
sys.path.append("common")
from misc import get_execution_role, wait_for_s3_object
from sagemaker.rl import RLEstimator, RLToolkit, RLFramework
```
### Steup S3 buckets
Set up the linkage and authentication to the S3 bucket that you want to use for checkpoint and the metadata.
```
sage_session = sagemaker.session.Session()
s3_bucket = sage_session.default_bucket()
s3_output_path = 's3://{}/'.format(s3_bucket)
print("S3 bucket path: {}".format(s3_output_path))
```
### Define Variables
We define variables such as the job prefix for the training jobs.
```
# create unique job name
job_name_prefix = 'rl-portfolio-management'
```
### Configure settings
You can run your RL training jobs on a SageMaker notebook instance or on your own machine. In both of these scenarios, you can run the following in either `local` or `SageMaker` modes. The `local` mode uses the SageMaker Python SDK to run your code in a local container before deploying to SageMaker. This can speed up iterative testing and debugging while using the same familiar Python SDK interface. You just need to set `local_mode = True`.
```
# run in local mode?
local_mode = False
```
### Create an IAM role
Either get the execution role when running from a SageMaker notebook `role = sagemaker.get_execution_role()` or, when running from local machine, use utils method `role = get_execution_role()` to create an execution role.
```
try:
role = sagemaker.get_execution_role()
except:
role = get_execution_role()
print("Using IAM role arn: {}".format(role))
```
### Install docker for `local` mode
In order to work in `local` mode, you need to have docker installed. When running from you local machine, please make sure that you have docker or docker-compose (for local CPU machines) and nvidia-docker (for local GPU machines) installed. Alternatively, when running from a SageMaker notebook instance, you can simply run the following script to install dependenceis.
Note, you can only run a single local notebook at one time.
```
# Run on SageMaker notebook instance
if local_mode:
!/bin/bash ./common/setup.sh
```
## Set up the environment
The environment is defined in a Python file called `portfolio_env.py` and the file is uploaded on `/src` directory.
The environment also implements the `init()`, `step()` and `reset()` functions that describe how the environment behaves. This is consistent with Open AI Gym interfaces for defining an environment.
1. init() - initialize the environment in a pre-defined state
2. step() - take an action on the environment
3. reset()- restart the environment on a new episode
4. [if applicable] render() - get a rendered image of the environment in its current state
```
!pygmentize src/portfolio_env.py
```
## Configure the presets for RL algorithm
The presets that configure the RL training jobs are defined in the `preset-portfolio-management-clippedppo.py` file which is also uploaded on the `/src` directory. Using the preset file, you can define agent parameters to select the specific agent algorithm. You can also set the environment parameters, define the schedule and visualization parameters, and define the graph manager. The schedule presets will define the number of heat up steps, periodic evaluation steps, training steps between evaluations.
These can be overridden at runtime by specifying the `RLCOACH_PRESET` hyperparameter. Additionally, it can be used to define custom hyperparameters.
```
!pygmentize src/preset-portfolio-management-clippedppo.py
```
## Write the Training Code
The training code is written in the file “train-coach.py” which is uploaded in the /src directory.
First import the environment files and the preset files, and then define the `main()` function.
```
!pygmentize src/train-coach.py
```
## Train the RL model using the Python SDK Script mode
If you are using local mode, the training will run on the notebook instance. When using SageMaker for training, you can select a GPU or CPU instance. The RLEstimator is used for training RL jobs.
1. Specify the source directory where the environment, presets and training code is uploaded.
2. Specify the entry point as the training code
3. Specify the choice of RL toolkit and framework. This automatically resolves to the ECR path for the RL Container.
4. Define the training parameters such as the instance count, job name, S3 path for output and job name.
5. Specify the hyperparameters for the RL agent algorithm. The `RLCOACH_PRESET` can be used to specify the RL agent algorithm you want to use.
6. [Optional] Choose the metrics that you are interested in capturing in your logs. These can also be visualized in CloudWatch and SageMaker Notebooks. The metrics are defined using regular expression matching.
```
if local_mode:
instance_type = 'local'
else:
instance_type = "ml.m4.4xlarge"
estimator = RLEstimator(source_dir='src',
entry_point="train-coach.py",
dependencies=["common/sagemaker_rl"],
toolkit=RLToolkit.COACH,
toolkit_version='0.11.0',
framework=RLFramework.MXNET,
role=role,
train_instance_count=1,
train_instance_type=instance_type,
output_path=s3_output_path,
base_job_name=job_name_prefix,
hyperparameters = {
"RLCOACH_PRESET" : "preset-portfolio-management-clippedppo",
"rl.agent_params.algorithm.discount": 0.9,
"rl.evaluation_steps:EnvironmentEpisodes": 5
}
)
# takes ~15min
# The log may show KL divergence=[0.]. This is expected because the divergences were not necessarily required for
# Clipped PPO. By default they are not calculated for computational efficiency.
estimator.fit()
```
## Store intermediate training output and model checkpoints
The output from the training job above is either stored in a local directory (`local` mode) or on S3 (`SageMaker`) mode.
```
%%time
job_name=estimator._current_job_name
print("Job name: {}".format(job_name))
s3_url = "s3://{}/{}".format(s3_bucket,job_name)
if local_mode:
output_tar_key = "{}/output.tar.gz".format(job_name)
else:
output_tar_key = "{}/output/output.tar.gz".format(job_name)
intermediate_folder_key = "{}/output/intermediate/".format(job_name)
output_url = "s3://{}/{}".format(s3_bucket, output_tar_key)
intermediate_url = "s3://{}/{}".format(s3_bucket, intermediate_folder_key)
print("S3 job path: {}".format(s3_url))
print("Output.tar.gz location: {}".format(output_url))
print("Intermediate folder path: {}".format(intermediate_url))
tmp_dir = "/tmp/{}".format(job_name)
os.system("mkdir {}".format(tmp_dir))
print("Create local folder {}".format(tmp_dir))
%%time
wait_for_s3_object(s3_bucket, output_tar_key, tmp_dir)
if not os.path.isfile("{}/output.tar.gz".format(tmp_dir)):
raise FileNotFoundError("File output.tar.gz not found")
os.system("tar -xvzf {}/output.tar.gz -C {}".format(tmp_dir, tmp_dir))
if not local_mode:
os.system("aws s3 cp --recursive {} {}".format(intermediate_url, tmp_dir))
if not os.path.isfile("{}/output.tar.gz".format(tmp_dir)):
raise FileNotFoundError("File output.tar.gz not found")
os.system("tar -xvzf {}/output.tar.gz -C {}".format(tmp_dir, tmp_dir))
print("Copied output files to {}".format(tmp_dir))
if local_mode:
checkpoint_dir = "{}/data/checkpoint".format(tmp_dir)
info_dir = "{}/data/".format(tmp_dir)
else:
checkpoint_dir = "{}/checkpoint".format(tmp_dir)
info_dir = "{}/".format(tmp_dir)
print("Checkpoint directory {}".format(checkpoint_dir))
print("info directory {}".format(info_dir))
```
## Visualization
### Plot rate of learning
We can view the rewards during training using the code below. This visualization helps us understand how the performance of the model represented as the reward has improved over time. For the consideration of training time, we restict the episodes number. If you see the final reward (average logarithmic cumulated return) is still below zero, try a larger training steps. The number of steps can be configured in the preset file.
```
%matplotlib inline
import pandas as pd
csv_file_name = "worker_0.simple_rl_graph.main_level.main_level.agent_0.csv"
key = os.path.join(intermediate_folder_key, csv_file_name)
wait_for_s3_object(s3_bucket, key, tmp_dir)
csv_file = "{}/{}".format(tmp_dir, csv_file_name)
df = pd.read_csv(csv_file)
df = df.dropna(subset=['Evaluation Reward'])
# print(list(df))
x_axis = 'Episode #'
y_axis = 'Evaluation Reward'
plt = df.plot(x=x_axis,y=y_axis, figsize=(12,5), legend=True, style='b-')
plt.set_ylabel(y_axis);
plt.set_xlabel(x_axis);
```
### Visualize the portfolio value
We use result of the last evaluation phase as an example to visualize the portfolio value. The following figure demonstrates reward vs date. Sharpe ratio and maximum drawdown are also calculated to help readers understand the return of an investment compared to its risk.
```
import numpy as np
import matplotlib.pyplot as plt
# same as in https://github.com/vermouth1992/drl-portfolio-management/blob/master/src/environment/portfolio.py
def sharpe(returns, freq=30, rfr=0):
""" Given a set of returns, calculates naive (rfr=0) sharpe. """
eps = np.finfo(np.float32).eps
return (np.sqrt(freq) * np.mean(returns - rfr + eps)) / np.std(returns - rfr + eps)
def max_drawdown(returns):
""" Max drawdown. See https://www.investopedia.com/terms/m/maximum-drawdown-mdd.asp """
eps = np.finfo(np.float32).eps
peak = returns.max()
trough = returns[returns.argmax():].min()
return (trough - peak) / (peak + eps)
info = info_dir + 'portfolio-management.csv'
df_info = pd.read_csv(info)
df_info['date'] = pd.to_datetime(df_info['date'], format='%Y-%m-%d')
df_info.set_index('date', inplace=True)
mdd = max_drawdown(df_info.rate_of_return + 1)
sharpe_ratio = sharpe(df_info.rate_of_return)
title = 'max_drawdown={: 2.2%} sharpe_ratio={: 2.4f}'.format(mdd, sharpe_ratio)
df_info[["portfolio_value", "market_value"]].plot(title=title, fig=plt.gcf(), rot=30)
```
## Load the checkpointed models for evaluation
Checkpointed data from the previously trained models will be passed on for evaluation / inference in the `checkpoint` channel. In `local` mode, we can simply use the local directory, whereas in the `SageMaker` mode, it needs to be moved to S3 first.
Since TensorFlow stores ckeckpoint file containes absolute paths from when they were generated (see [issue](https://github.com/tensorflow/tensorflow/issues/9146)), we need to replace the absolute paths to relative paths. This is implemented within `evaluate-coach.py`
```
%%time
if local_mode:
checkpoint_path = 'file://{}'.format(checkpoint_dir)
print("Local checkpoint file path: {}".format(checkpoint_path))
else:
checkpoint_path = "s3://{}/{}/checkpoint/".format(s3_bucket, job_name)
if not os.listdir(checkpoint_dir):
raise FileNotFoundError("Checkpoint files not found under the path")
os.system("aws s3 cp --recursive {} {}".format(checkpoint_dir, checkpoint_path))
print("S3 checkpoint file path: {}".format(checkpoint_path))
```
### Run the evaluation step
Use the checkpointed model to run the evaluation step.
```
%%time
estimator_eval = RLEstimator(role=role,
source_dir='src/',
dependencies=["common/sagemaker_rl"],
toolkit=RLToolkit.COACH,
toolkit_version='0.11.0',
framework=RLFramework.MXNET,
entry_point="evaluate-coach.py",
train_instance_count=1,
train_instance_type=instance_type,
hyperparameters = {}
)
estimator_eval.fit({'checkpoint': checkpoint_path})
```
## Risk Disclaimer (for live-trading)
This notebook is for educational purposes only. Past trading performance does not guarantee future performance. The loss in trading can be substantial, and therefore
**investors should use all trading strategies at their own risk**.
| github_jupyter |
<a href="https://colab.research.google.com/github/Katonokatono/Term-Deposit-Project/blob/Hypothesis-Testing/Term_Deposit_Hypothesis_Testing_Module1_Prj.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#Import right libraries
import scipy.stats as stats
from sklearn.cluster import KMeans
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
#Call required libraries
# To time processes
import time
#To suppress warnings
import warnings
#Data manipulation
import numpy as np
import pandas as pd
#For graphics
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
# For scaling dataset
from sklearn.preprocessing import StandardScaler
#For clustering
from sklearn.cluster import KMeans, AgglomerativeClustering, AffinityPropagation
#For GMM clustering
from sklearn.mixture import GaussianMixture
# For os related operations
import os
import sys
#load the dataset
bank=pd.read_csv('/content/raw_data.csv')
#preview
bank
#preview the columns
bank.columns
#preview the data types
bank.dtypes
# visualization styling code
sns.set(rc={'figure.figsize':(13, 7.5)})
sns.set_context('talk')
#Turning off warnings
import warnings
def fxn():
warnings.warn("deprecated", DeprecationWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fxn()
bank.count()
```
i**.Nomality Test**
```
# Normality test
from scipy.stats import shapiro
def shapiro_test(bank, col_list):
for x in col_list:
print(x)
data = bank[x]
stat, p = shapiro(data)
print('Statistics=%.3f, p=%.3f' % (stat, p))
# interpret
alpha = 0.05
if p > alpha:
print('Sample looks Gaussian (fail to reject H0)')
else:
print('Sample does not look Gaussian (reject H0)')
print('\n')
#use pp plot to check for nomality in age variable
import matplotlib.pyplot as plt
stats.probplot(bank['age'], plot= plt)
plt.show()
#shapiro Wilk's test
#check nomality test for age column
stat, p = shapiro(bank['age'])
print('Statistics=%.3f, p=%.3f' % (stat, p))
# interpreting
alpha = 0.05
if p > alpha:
print('Sample looks Gaussian')
else:
print('Sample does not look Gaussian')
#use pp plot to check for nomality in age variable
import matplotlib.pyplot as plt
stats.probplot(bank['duration'], plot= plt)
plt.show()
#check nomality test for duration column
stat, p = shapiro(bank['duration'])
print('Statistics=%.3f, p=%.3f' % (stat, p))
# interpreting
alpha = 0.05
if p > alpha:
print('Sample looks Gaussian')
else:
print('Sample does not look Gaussian')
```
ii. **Sampling and Hypothesis Testing**
**Relationship between term_deposit and marital**
```
#Drop the unknown in marital columns
df_marital=bank.drop(bank.index[bank['marital'] == 'unknown'], inplace = True)
#preview the column for marital
bank['marital']
# Stratified sample
df_marital= bank.groupby('marital', group_keys=False).apply(lambda grouped_subset : grouped_subset.sample(frac=0.1))
#preview
df_marital
table_marital= pd.crosstab(bank['marital'], bank['term_deposit'])
table_marital
# Perform chi-square test
from scipy.stats import chi2_contingency
from scipy.stats import chi2
stat, p, dof, expected = chi2_contingency(table_marital.to_numpy())
#Calculate critical value, set significance level = 0.05
prob = 0.95
critical_value = chi2.ppf(prob, dof)
print(f'Propability: {prob}, Critical value: {critical_value}, Test statistic: {stat}')
print(f'Alpha: {1-prob}, p-value: {p}')
```
p-value less than alpha, thus significant evidence to reject null hypothesis
```
#Heat map representation
plt.figure(figsize=(12,8))
sns.heatmap(table_marital, annot=True, cmap="YlGnBu")
```
**Relationship between term_deposit and education**
```
#Relationship between Term deposit and Education
#Drop the unknown in marital columns
bank.drop(bank.index[bank['education'] == 'unknown'], inplace = True)
#preview the column for education
bank['education']
# Stratified sample
df_education= bank.groupby('education', group_keys=False).apply(lambda grouped_subset : grouped_subset.sample(frac=0.1))
#preview
df_education
#education vs term_deposit
table_edu= pd.crosstab(bank['education'], bank['term_deposit'])
table_edu
# Perform chi-square test
from scipy.stats import chi2_contingency
from scipy.stats import chi2
stat, p, dof, expected = chi2_contingency(table_edu.to_numpy())
#Calculate critical value, set significance level = 0.05
prob = 0.95
critical_value = chi2.ppf(prob, dof)
print(f'Propability: {prob}, Critical value: {critical_value}, Test statistic: {stat}')
print(f'Alpha: {1-prob}, p-value: {p}')
```
p-value less than alpha, thus significant evidence to reject null hypothesis
```
#Heat map representation
plt.figure(figsize=(12,8))
sns.heatmap(table_edu, annot=True, cmap="YlGnBu")
```
**Relationship between term_deposit and job**
```
#Drop the unknown in marital columns
bank.drop(bank.index[bank['job'] == 'unknown'], inplace = True)
#preview job column
bank['job']
#job vs term_deposit
table_job= pd.crosstab(bank['job'], bank['term_deposit'])
table_job
# Perform chi-square test
from scipy.stats import chi2_contingency
from scipy.stats import chi2
stat, p, dof, expected = chi2_contingency(table_job.to_numpy())
#Calculate critical value, set significance level = 0.05
prob = 0.95
critical_value = chi2.ppf(prob, dof)
print(f'Propability: {prob}, Critical value: {critical_value}, Test statistic: {stat}')
print(f'Alpha: {1-prob}, p-value: {p}')
```
p-value less than alpha, thus significant evidence to reject null hypothesis
```
#Heat map representation
plt.figure(figsize=(12,8))
sns.heatmap(table_job, annot=True, cmap="YlGnBu")
```
Relationship between Loan and Term_deposit
```
#Drop the unknown in marital columns
bank.drop(bank.index[bank['loan'] == 'unknown'], inplace = True)
#preview the loan column
bank['loan']
# Stratified sample
df_loan= bank.groupby('job', group_keys=False).apply(lambda grouped_subset : grouped_subset.sample(frac=0.1))
#preview
df_loan
#loan vs term_deposit
table_loan= pd.crosstab(bank['loan'], bank['term_deposit'])
table_loan
# Perform chi-square test
from scipy.stats import chi2_contingency
from scipy.stats import chi2
stat, p, dof, expected = chi2_contingency(table_job.to_numpy())
#Calculate critical value, set significance level = 0.05
prob = 0.95
critical_value = chi2.ppf(prob, dof)
print(f'Propability: {prob}, Critical value: {critical_value}, Test statistic: {stat}')
print(f'Alpha: {1-prob}, p-value: {p}')
```
p-value less than alpha, thus significant evidence to reject null hypothesis
```
#Heat map representation
plt.figure(figsize=(12,8))
sns.heatmap(table_loan, annot=True, cmap="YlGnBu")
```
**Relationship between term_deposit and housing**
```
#Drop the unknown in housing columns
bank.drop(bank.index[bank['housing'] == 'unknown'], inplace = True)
#preview the housing column
bank['housing']
#housing vs bank
table_housing= pd.crosstab(bank['housing'], bank['term_deposit'])
table_housing
# Perform chi-square test
from scipy.stats import chi2_contingency
from scipy.stats import chi2
stat, p, dof, expected = chi2_contingency(table_housing.to_numpy())
#Calculate critical value, set significance level = 0.05
prob = 0.95
critical_value = chi2.ppf(prob, dof)
print(f'Propability: {prob}, Critical value: {critical_value}, Test statistic: {stat}')
print(f'Alpha: {1-prob}, p-value: {p}')
```
p value is greater than alpha, thus significant evidence to accept null hypothesis
```
#Heat map representation
plt.figure(figsize=(12,8))
sns.heatmap(table_housing, annot=True, cmap="YlGnBu")
```
**Relationship between Term_deposit Account and Age**
```
#sample 40 records in age
table_age= bank[['age', 'term_deposit']].sample(n=40, random_state=1)
#preview the sample
table_age
#convert term_deposit column into numerical
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
label_encoder.fit(bank['term_deposit'])
bank['term_deposit'] = label_encoder.transform(bank[['term_deposit']])
bank
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
label_encoder.fit(table_age['term_deposit'])
table_age['term_deposit'] = label_encoder.transform(table_age['term_deposit'])
table_age
#population mean of age
a = bank['age'].mean()
a
#population mean of term_deposit account
b= bank['term_deposit'].mean()
b
#sample mean of age
c=table_age['age'].mean()
c
#sample mean of term_deposit account
d=table_age['term_deposit'].mean()
d
#sample standard deviation
e= table_age['age'].std()
e
#sample standard deviation
f=table_age['term_deposit'].std()
f
# point estimation
# population.mean() - sample.mean()
bank['age'].mean() - table_age['age'].mean()
# Perform a two sample z test
from statsmodels.stats.weightstats import ztest
zscore, p = ztest(x1 = table_age[table_age['term_deposit']==1]['age'].values, x2=table_age[table_age['term_deposit']==0]['age'].values)
print(f'Test statistic: {zscore}, p-value: {p}')
#interpretation of the p value
# alpha value is 0.05 or 5%
if p < 0.05:
print(" we are rejecting null hypothesis")
else:
print("we fail to reject null hypothesis")
import math
sample_mean = table_age['term_deposit'].mean()
# Get the z-critical value
z_critical = stats.norm.ppf(q = 0.975)
# Check the z-critical value
print("z-critical value:")
print(z_critical)
# Get the population standard deviation
pop_stdev = bank['age'].std()
margin_of_error = z_critical * (pop_stdev/math.sqrt(296))
confidence_interval = (sample_mean - margin_of_error,
sample_mean + margin_of_error)
print("Confidence interval:")
print(confidence_interval)
```
Relationship between term_deposit and call duration
```
#sample 40 records in age
table_duration= bank[['duration', 'term_deposit']].sample(n=40, random_state=1)
#preview the sample
table_duration
#population mean of age
a = bank['duration'].mean()
a
#population mean of term_deposit account
b= bank['term_deposit'].mean()
b
#sample mean of duration
c=table_duration['duration'].mean()
c
#sample mean of term_deposit account
d=table_duration['term_deposit'].mean()
d
#sample standard deviation
e= table_age['age'].std()
e
#sample standard deviation
f=table_duration['term_deposit'].std()
f
# population.mean() - sample.mean()
bank['age'].mean() - table_age['age'].mean()
# Perform a two sample z test
from statsmodels.stats.weightstats import ztest
zscore, p = ztest(x1 = table_duration[table_duration['term_deposit']==1]['duration'].values, x2=table_duration[table_duration['term_deposit']==0]['duration'].values)
print(f'Test statistic: {zscore}, p-value: {p}')
#interpretation of the p value
# alpha value is 0.05 or 5%
if p < 0.05:
print(" we are rejecting null hypothesis")
else:
print("we fail to reject null hypothesis")
import math
sample_mean = table_duration['term_deposit'].mean()
# Get the z-critical value
z_critical = stats.norm.ppf(q = 0.975)
# Check the z-critical value
print("z-critical value:")
print(z_critical)
# Get the population standard deviation
pop_stdev = bank['duration'].std()
margin_of_error = z_critical * (pop_stdev/math.sqrt(296))
confidence_interval = (sample_mean - margin_of_error,
sample_mean + margin_of_error)
print("Confidence interval:")
print(confidence_interval)
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
from pathlib import Path
import sys
parent_path = str(Path().joinpath('..').resolve())
sys.path.append(parent_path)
from triple_agent.parsing.replay.get_parsed_replays import get_parsed_replays
from triple_agent.constants.paths import REPLAY_PICKLE_FOLDER
from triple_agent.classes.action_tests import ActionTest
from triple_agent.classes.outcomes import WinType
from triple_agent.classes.missions import Missions
from triple_agent.reports.specific.mission_choices import mission_choices
#from triple_agent.reports.specific.mission_completes import mission_completion, mission_completion_query
from triple_agent.reports.specific.action_tests import action_test_percentages, diff_action_test_percentages
from triple_agent.reports.specific.fingerprints import attempted_fingerprint_sources
from triple_agent.reports.specific.banana_breads import all_banana_bread_percentages, first_banana_bread_percentages, banana_split
from triple_agent.reports.specific.character_selection import (spy_selection,
st_selection,
amba_selection,
double_agent_selection,
determine_character_in_role)
from triple_agent.reports.specific.bug import bug_attempt_timings, bug_success_rate
from triple_agent.reports.specific.time_adds import time_add_times, time_add_times_per_game
from triple_agent.reports.specific.game_outcomes import game_outcomes
from triple_agent.reports.specific.seduce import first_flirt_timing
from triple_agent.constants.events import SCL5_PICK_MODES, SCL5_VENUE_MODES, SCL5_DROPPED_PLAYERS
from triple_agent.reports.specific.stop_talks import stop_talk_in_game_percentage
from triple_agent.reports.specific.microfilm import at_or_direct_mf
from triple_agent.classes.roles import Roles
from triple_agent.reports.generation.common_sort_functions import sort_by_spy_wins
from triple_agent.reports.generation.plot_specs import AxisProperties, DataQueryProperties
division = 'Copper'
div_replays = get_parsed_replays(REPLAY_PICKLE_FOLDER,
lambda g: g.division == division \
and g.event == 'SCL5' \
and g.spy not in SCL5_DROPPED_PLAYERS \
and g.sniper not in SCL5_DROPPED_PLAYERS)
_=game_outcomes(div_replays, axis_properties=AxisProperties(title=f"{division} Game Outcomes"))
_=game_outcomes(
div_replays,
DataQueryProperties(groupby=lambda g: g.venue),
AxisProperties(title=f"{division} Outcomes by Venue")
)
_=game_outcomes(
div_replays,
DataQueryProperties(
groupby=lambda g: g.spy,
percent_normalized_data=True,
primary_order=[WinType.MissionsWin, WinType.CivilianShot, WinType.SpyShot, WinType.TimeOut],
secondary_order=sort_by_spy_wins
),
AxisProperties(title=f"{division} Outcomes by Player (Spy)")
)
_=game_outcomes(
div_replays,
DataQueryProperties(
groupby=lambda g: g.sniper,
percent_normalized_data=True,
primary_order=[WinType.MissionsWin, WinType.CivilianShot, WinType.SpyShot, WinType.TimeOut],
secondary_order=sort_by_spy_wins,
reverse_secondary_order=True
),
AxisProperties(title=f"{division} Outcomes by Player (Sniper)")
)
_=time_add_times_per_game(
div_replays,
DataQueryProperties(
groupby=lambda g: g.spy,
),
AxisProperties(title=f"{division} Time Adds per Game by Spy")
)
```
| github_jupyter |
# idftshift function
### Undoes the effects of iafftshift.
HS = dftshift(H).
HS: Image.
H: Image. DFT image with (0,0) in the center.
```
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import sys,os
ia898path = os.path.abspath('/etc/jupyterhub/ia898_1s2017/')
if ia898path not in sys.path:
sys.path.append(ia898path)
import ia898.src as ia
ia636path = os.path.abspath('/home/lotufo/ia636/ia636/')
if ia636path not in sys.path:
sys.path.append(ia636path)
testing = (__name__ == "__main__")
def Copy_of_4_slices(f):
f_array = np.asarray(f)
fx2 = np.concatenate((f_array,f_array),axis=1)
fx4 = np.concatenate((fx2,fx2))
return fx4
def Copy_of_4_ImagesDepth(f):
f_array = np.asarray(f)
fx2 = np.concatenate((f_array,f_array),axis=1)
fx4 = np.concatenate((fx2,fx2))
fx4Depth = np.dstack((fx4,fx4))
return fx4Depth
def ptrans(f,t):
g = np.empty(f.shape)
if f.ndim == 1:
w = len(f);
ti=t
ti = (-t + w) % w # mapeamento inverso para w com translação en t (eixo X)
fx2 = np.append(f, f)
g = fx2[ti:ti+w]
elif f.ndim == 2:
h,w = f.shape
ti = t;
ti[0] = (-t[0] + h) % h # mapeamento inverso para h com translação en t[0] (eixo Y)
ti[1] = (-t[1] + w) % w # mapeamento inverso para w com translação en t[1] (eixo X)
i4 = Copy_of_4_slices(f)
g = i4[ti[0]:ti[0]-h,ti[1]:ti[1]+w]
elif f.ndim == 3:
z,h,w = f.shape
ti = t
ti[0] = (-t[0] + z) % z # mapeamento inverso para z com translação en t[0] (eixo Z)
ti[1] = (-t[1] + h) % h # mapeamento inverso para h com translação en t[1] (eixo Y)
ti[2] = (-t[1] + w) % w # mapeamento inverso para w com translação en t[2] (eixo X)
i4 = Copy_of_4_ImagesDepth(x3d)
g = i4[ti[0]:ti[0]+z,ti[1]:ti[1]-h,ti[2]:ti[2]+w]
return g
if testing:
l = np.array([1,2,3,4,5,6])
x=np.array([[1,2,3],[4,5,6],[7,8,9]])
x3d = x.reshape(1,x.shape[0],x.shape[1])
print('teste 1 d')
%timeit ptrans(l,3)
%timeit ia.ptrans(l,3)
print('teste 2 d')
%timeit ptrans(x,[1,2])
%timeit ia.ptrans(x,[1,2])
print('teste 3 d')
%timeit ptrans(x3d,[1,2,1])
%timeit ia.ptrans(x3d,[1,2,1])
if testing:
f = mpimg.imread('/home/lotufo/ia898/data/cameraman.tif')
g1= ptrans(f, np.array(f.shape)//3)
g2 =ia.ptrans(f, np.array(f.shape)//3)
nb = ia.nbshow(2)
nb.nbshow(g1,title='ptrans')
nb.nbshow(g2,title='ia.ptrans')
nb.nbshow()
print('teste')
%timeit ptrans(f,np.array(f.shape)//3)
%timeit ia.ptrans(f,np.array(f.shape)//3)
def dftshift(f):
return ptrans(f, np.array(f.shape)//2)
def idftshift(f):
return ptrans(f, np.ceil(-np.array(np.shape(f))/2).astype(np.int))
f = mpimg.imread('/home/lotufo/ia898/data/cameraman.tif')
F = ia.dft(f)
Fs = ia.dftshift(F)
Fs1 = dftshift(F)
iFs1 = idftshift(Fs1)
ia.adshow(ia.dftview(F))
ia.adshow(ia.dftview(Fs))
ia.adshow(ia.dftview(Fs1))
ia.adshow(ia.dftview(iFs1))
```
| github_jupyter |
# Breaking daily ranges
```
import pandas as pd
from datetime import timedelta, date
start_date = date(year=2021, month=9, day=1)
end_date = date(year=2021, month=11, day=1)
d1=pd.date_range(start_date, end_date, freq="W-FRI")
d1
d2=pd.date_range(start_date, end_date, freq="W-MON")
d2
ranges=[]
ranges.append((pd.Timestamp(start_date), d1[0]))
for s,e in zip(d2[:-1], d1[1:]):
ranges.append((s,e))
ranges.append((d2[-1], pd.Timestamp(end_date)))
ranges
import pandas as pd
from datetime import date, timedelta
from itertools import zip_longest
start_date = date(year=2021, month=8, day=30)
end_date = date(year=2021, month=11, day=8)
d1=pd.date_range(start_date, end_date, freq="W-FRI")
d2=pd.date_range(start_date, end_date, freq="W-MON")
ranges=[]
ranges.append((pd.Timestamp(start_date), d1[0]))
for s,e in zip(d2[:-1], d1[1:]):
ranges.append((s,e))
ranges.append((d2[-1], pd.Timestamp(end_date)))
#In this case len(d1) != len(d2) --> zip doesn't work
ranges_corected = []
ranges_corected.append((pd.Timestamp(start_date), d1[0]))
for i, j in zip_longest(d2[:-1], d1[1:]):
ranges_corected.append((i, j))
if start_date.weekday() == 5:
start_date = start_date + timedelta(days=2)
elif start_date.weekday() == 6:
start_date = start_date + timedelta(days=1)
if start_date.weekday() == 0:
l1,l2 = zip(*ranges_corected)
ranges_corected = list(zip(l1[1:],l2))
elif start_date.weekday() == 4:
l1,l2 = zip(*ranges_corected)
t = (l2[0] + timedelta(days=1),)
l2 = t + l2[1:]
ranges_corected = list(zip(l1,l2))
ranges_corected = ranges_corected[:-1]
if end_date.weekday() == 0:
end_date = end_date + timedelta(days=1)
ranges_corected.append((d2[-1], pd.Timestamp(end_date)))
ranges_corected
import pandas as pd
from datetime import date
from itertools import zip_longest
start_date = date(year=2021, month=9, day=7)
end_date = date(year=2021, month=11, day=8)
len(pd.bdate_range(start_date, end_date)
d1=pd.date_range(start_date, end_date, freq="W-MON", closed="right")
d2=pd.date_range(start_date, end_date, freq="W-SAT", closed="right")
ranges=[]
#ranges.append((pd.Timestamp(start_date), d1[0]))
for s,e in zip(d1, d2):
ranges.append((s,e))
#ranges.append((d2[-1], pd.Timestamp(end_date)))
#if ranges[-1][0] == ranges[-1][1]:
# ranges[-1] = (ranges[-1][0], ranges[-1][0] + timedelta(days=1))
#if ranges[0][0] == ranges[0][1]:
# ranges[0] = (ranges[0][0], ranges[0][0] + timedelta(days=1))
d1, d2, ranges
import pandas as pd
from datetime import date
from math import ceil
start_date = date(year=2021, month=9, day=7)
end_date = date(year=2021, month=11, day=8)
days = len(pd.bdate_range(start_date, end_date))
total_data_points = days * 390 # can make more accurate later by looking at time-scale
max_data_points_per_load = 5000 # alapca limit 5,000, polygon limit 10,000
total_periods = ceil(total_data_points / max_data_points_per_load) +1
ranges = pd.date_range(start_date, end_date, periods=total_periods)
ranges
```
# Breaking hourly ranges
| github_jupyter |
# Midterm #2 Solution
```
import numpy as np
import pandas as pd
import statsmodels.api as sm
data = pd.read_excel('data/assetclass_data_monthly_2009.xlsx',index_col='Dates').loc['2012-01-31':]
exret = (data.subtract(data['Cash'],axis=0)).drop('Cash',axis=1)
exret
# 1.1.a
means = exret.mean()*12
display(means)
stds = exret.std()*12**0.5
display(stds)
# 1.1.b
(means/stds).sort_values()
```
## 1.1(c)
MV optimization maximizes *Portfolio* Sharpe ratio, but that doesn't mean it only pays attention to *individual security* Sharpe ratio.
An individual security can be highly useful to a Portfolio's Sharpe ratio if it has low (or even negative covariances and correlations.) This may be so useful that it makes up for the security having a low Sharpe ratio
```
# 1.1.c
print("If the asset has negative correlation to many other assets, mathematically more weights will be allocated to the asset to utilize the effects of diverfication.")
# 1.1.d
exret.quantile(0.05)
# 1.2.a
sigma_mu = np.linalg.inv(exret.cov())@exret.mean()
w_tangent = sigma_mu / sigma_mu.sum()
w_tangent
# 1.2.b
tan_mean_return = w_tangent@exret.mean()
rf_mean_return = data['Cash'].mean()
delta = 0.01 / tan_mean_return
display(delta)
# 1.3.a
sigma_mu_diag = np.linalg.inv(np.diag(exret.var()))@exret.mean()
w_tangent_diag = sigma_mu_diag / sigma_mu_diag.sum()
w_tangent_diag
```
## 1.3(b)
We are using a biased method because it delivers much smaller variation of estimates. Thus, we have more confidence that this (biased) method will be more useful in out-of-sample data. The classic solution is unbiased, but it varies wildly from in-sample to out-of-sample data, making it less practical.
```
# 2.1.a
res21 = sm.OLS(exret['Foreign Equity'],sm.add_constant(exret['Domestic Equity'])).fit()
display(res21.summary())
print(f"alpha={res21.params[0]}\nbeta={res21.params[1]}\nr-squared={res21.rsquared}")
# 2.1.b
print("It implies a short position, because the significant beta indicates a positive correlation, and they need to be hedged in an opposite direction")
# 2.1.c
info_ratio = res21.params[0]/res21.resid.std()
display(info_ratio)
# 2.2.a
res22a = sm.OLS(exret['Foreign Equity'],sm.add_constant(exret.loc[:,exret.columns!='Foreign Equity'])).fit()
res22a.summary()
# 2.2.b
print("Inflation-Indexed has the smallest beta, while Private Equity has the largest.")
res22a.params[1:].sort_values()
# 2.2.c
print("Single asset regression should give a smaller error out of sample, and multiple asset regression smaller in sample.")
# 3.1.a
print("Smaller (in term of absolute value). When it is a perfect model, there should be no alpha. Therefore, the information ratio should be zero.")
# 3.1.b
print("Not certain. CAPM model does not specify how well the pricing factor explains the variation of an asset. So we have no guesses on this.")
# 3.1.c
print("Treynor ratio will be higher after hedging. becuase the hedging would hedge out the beta to be 0, therefore theoratically the treynor will be large.")
```
| github_jupyter |
# Debug
```
# trying to find a potential bug --> but things look correct
# #!!!
# check that Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_backtracked.txt has no redundant ENSP 2 function associations with different Scores
# ENSP = "9606.ENSP00000340944"
# funcName = "GO:0016020" # membrane
# # PTPN11 (ENSP00000340944), is associated with a “Membrane” term with 3 stars.
# # original table
# cond = (df1["ENSP"] == ENSP) & (df1["funcName"] == funcName)
# df1[cond]
# Taxid Etype ENSP funcName Score
# 21974650 9606 -22 9606.ENSP00000340944 GO:0016020 3.067805
# 21975146 9606 -22 9606.ENSP00000340944 GO:0016020 2.145024
# 21975155 9606 -22 9606.ENSP00000340944 GO:0016020 2.081105
alternative_2_current_ID_dict = {}
alternative_2_current_ID_dict.update(cst.get_alternative_2_current_ID_dict(GO_obo_Jensenlab, upk=False))
alternative_2_current_ID_dict.update(cst.get_alternative_2_current_ID_dict(GO_obo, upk=False))
# GOCC not needed yet, lineage_dict has GOCC terms but output file has normal GO terms, conversion happens at second backtracking step
alternative_2_current_ID_dict.update(cst.get_alternative_2_current_ID_dict(BTO_obo_Jensenlab, upk=True))
alternative_2_current_ID_dict.update(cst.get_alternative_2_current_ID_dict(DOID_obo_current, upk=True))
DAG = obo_parser.GODag(obo_file=GO_obo_Jensenlab, upk=False)
DAG.load_obo_file(obo_file=DOID_obo_current, upk=True)
DAG.load_obo_file(obo_file=BTO_obo_Jensenlab, upk=True)
# GO_CC_textmining_additional_etype should always be False here --> replaces "GO" with "GOCC". Not necessary yet since, all terms still -22 not -20.
lineage_dict_direct_parents = cst.get_lineage_dict_for_DOID_BTO_GO(GO_obo_Jensenlab, DOID_obo_current, BTO_obo_Jensenlab, GO_CC_textmining_additional_etype=False, direct_parents_only=True)
# backtracking with smart logic to propagate scores
```
# Imports
```
pwd
cd app/python
import os, sys
import pandas as pd
import numpy as np
import query
import obo_parser
import create_SQL_tables_snakemake as cst
import tools
import numpy as np
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
import os, sys, math
from importlib import reload
import random
from itertools import product
import plotly.express as px
import plotly.graph_objects as go
import plotly.io as pio
import math
from matplotlib_venn import venn2, venn2_circles, venn2_unweighted
from matplotlib_venn import venn3, venn3_circles
from matplotlib import pyplot as plt
fn_human_ENSPs = r"/mnt/mnemo5/dblyon/agotool/data/PostgreSQL/tables/9606_proteins_ENSPs_STRINGv11.txt"
human_ENSPs = []
with open(fn_human_ENSPs, "r") as fh_in:
for line in fh_in:
human_ENSPs.append(line.strip())
human_ENSPs = sorted(human_ENSPs)
d = query.get_Secondary_2_Primary_IDs_dict_from_sec(ids_2_map=human_ENSPs)
human_UniProt_entries = sorted(d.values())
reload(query)
lineage_dict = query.get_lineage_dict_hr()
def which_child_has_this_parent(children, parent, lineage_dict):
for child in children:
try:
all_parents = lineage_dict[child]
except KeyError:
print("{} has no lineage #!!!".format(child))
continue
if parent in all_parents:
print("{} has {} as a parent".format(child, parent))
%matplotlib inline
DOWNLOADS_DIR = r"/scratch/dblyon/agotool/data/PostgreSQL/downloads"
TABLES_DIR = r"/scratch/dblyon/agotool/data/PostgreSQL/tables"
GO_obo_Jensenlab = os.path.join(DOWNLOADS_DIR, "go_Jensenlab.obo")
GO_obo = os.path.join(DOWNLOADS_DIR, "go-basic.obo")
DOID_obo_current = os.path.join(DOWNLOADS_DIR, "DOID_obo_current.obo") # http://purl.obolibrary.org/obo/doid.obo
BTO_obo_Jensenlab = os.path.join(DOWNLOADS_DIR, "bto_Jensenlab.obo") # static file
Taxid_UniProtID_2_ENSPs_2_KEGGs = os.path.join(TABLES_DIR, "Taxid_UniProtID_2_ENSPs_2_KEGGs.txt")
Protein_2_Function_and_Score_DOID_BTO_GOCC_STS = os.path.join(DOWNLOADS_DIR, "Protein_2_Function_and_Score_DOID_BTO_GOCC_STS.txt.gz")
Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_reformatted = os.path.join(TABLES_DIR, "Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_reformatted.txt")
Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_backtracked = os.path.join(TABLES_DIR, "Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_backtracked.txt")
Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_backtracked_rescaled = os.path.join(TABLES_DIR, "Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_backtracked_rescaled.txt")
Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized = os.path.join(TABLES_DIR, "Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized.txt")
Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked = os.path.join(TABLES_DIR, "Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked.txt")
Protein_2_Function_DOID_BTO_GOCC_UPS = os.path.join(TABLES_DIR, "Protein_2_Function_DOID_BTO_GOCC_UPS.txt")
GO_CC_textmining_additional_etype=True
DAG = obo_parser.GODag(obo_file=GO_obo_Jensenlab, upk=False)
DAG.load_obo_file(obo_file=DOID_obo_current, upk=True)
DAG.load_obo_file(obo_file=BTO_obo_Jensenlab, upk=True)
DAG.load_obo_file(obo_file=GO_obo, upk=False)
lineage_dict_all_parents = cst.get_lineage_dict_for_DOID_BTO_GO(GO_obo_Jensenlab, DOID_obo_current, BTO_obo_Jensenlab, GO_CC_textmining_additional_etype=True, direct_parents_only=False)
secondary_2_primaryTerm_dict, obsolete_terms_set = cst.get_secondary_2_primaryTerm_dict_and_obsolete_terms_set(DAG)
ENSP_2_UniProtID_dict = cst.get_ENSP_2_UniProtID_dict(Taxid_UniProtID_2_ENSPs_2_KEGGs)
!ls -lah {Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_backtracked}
def reformat_TextMining_download(Protein_2_Function_and_Score_DOID_BTO_GOCC_STS, Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_reformatted):
with open(Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_reformatted, "w") as fh_out:
fh_out.write("Taxid\tEtype\tENSP\tfuncName\tScore\n")
for line in tools.yield_line_uncompressed_or_gz_file(Protein_2_Function_and_Score_DOID_BTO_GOCC_STS):
ENSP, funcName_2_score_arr_str, etype = line.split("\t")
etype = etype.strip()
taxid = ENSP.split(".")[0]
funcName_2_score_list_temp = cst.helper_convert_str_arr_2_nested_list(funcName_2_score_arr_str)
for funcName, score in funcName_2_score_list_temp:
fh_out.write("{}\t{}\t{}\t{}\t{}\n".format(taxid, etype, ENSP, funcName, score))
reformat_TextMining_download(Protein_2_Function_and_Score_DOID_BTO_GOCC_STS, Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_reformatted)
# !zgrep -P "^9606." {Protein_2_Function_and_Score_DOID_BTO_GOCC_STS} | grep "GO:0016020"
# use backtracked and rescaled scores
df1 = pd.read_csv(Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_backtracked, sep='\t')
df1 = df1[df1["ENSP"].isin(human_ENSPs)]
df1.head(3)
df1["Category"] = ""
df1.loc[df1["Etype"] == -22, "Category"] = "Compartments"
df1.loc[df1["Etype"] == -25, "Category"] = "Tissues"
df1.loc[df1["Etype"] == -26, "Category"] = "Diseases"
unique = ["Compartments", "Tissues", "Diseases"]
sns_plot = sns.displot(df1, x="Score", kind="kde", hue="Category", cut=True, clip=(0, 5), palette=dict(zip(unique, sns.color_palette(n_colors=len(unique)))))
sns_plot.savefig(r"/scratch/dblyon/agotool/data/PostgreSQL/tables/plots/Human2p_Score_KDE.pdf")
df1["Category"] = ""
df1.loc[df1["Etype"] == -22, "Category"] = "Compartments"
df1.loc[df1["Etype"] == -25, "Category"] = "Tissues"
df1.loc[df1["Etype"] == -26, "Category"] = "Diseases"
unique = ["Compartments", "Tissues", "Diseases"]
sns_plot = sns.displot(df1, x="Score", kind="kde", hue="Category", cut=True, clip=(0, 5), palette=dict(zip(unique, sns.color_palette(n_colors=len(unique)))))
sns_plot.savefig(r"/scratch/dblyon/agotool/data/PostgreSQL/tables/plots/Human2p_Score_KDE.pdf")
dfx = df1.groupby("funcName")["ENSP"].count().reset_index().rename(columns={"ENSP": "num_ENSPs"})
dfx["log_num_ENSPs"] = dfx["num_ENSPs"].apply(lambda x: np.log10(x))
funcName_2_logNumENSPs_dict = {funcName_logENSPs[0]: funcName_logENSPs[1] for funcName_logENSPs in zip(dfx["funcName"].values, dfx["log_num_ENSPs"].values)}
dfx = df1[["Etype", "funcName"]].drop_duplicates()
dfx["log_num_ENSPs"] = dfx["funcName"].apply(lambda x: funcName_2_logNumENSPs_dict[x])
dfx = dfx[dfx["log_num_ENSPs"] > 0] # more than 1 function per protein per genome
entityType_2_functionType_dict = {-22: "Compartments",
-25: "Tissues",
-26: "Diseases",}
dfx["category"] = dfx["Etype"].apply(lambda x: entityType_2_functionType_dict[x])
unique = ["Compartments", "Tissues", "Diseases"]
sns_plot = sns.displot(dfx, x="log_num_ENSPs", kind="kde", hue="category", cut=True, clip=(0, 10), palette=dict(zip(unique, sns.color_palette(n_colors=len(unique)))))
sns_plot.savefig(r"/scratch/dblyon/agotool/data/PostgreSQL/tables/plots/Human2p_Log10_TermSize.pdf")
dfx = df1.groupby("funcName")["ENSP"].count().reset_index().rename(columns={"ENSP": "num_ENSPs"})
dfx["log_num_ENSPs"] = dfx["num_ENSPs"].apply(lambda x: np.log10(x))
funcName_2_logNumENSPs_dict = {funcName_logENSPs[0]: funcName_logENSPs[1] for funcName_logENSPs in zip(dfx["funcName"].values, dfx["log_num_ENSPs"].values)}
dfx = df1[["Etype", "funcName"]].drop_duplicates()
dfx["log_num_ENSPs"] = dfx["funcName"].apply(lambda x: funcName_2_logNumENSPs_dict[x])
dfx = dfx[dfx["log_num_ENSPs"] > 0] # more than 1 function per protein per genome
entityType_2_functionType_dict = {-22: "Compartments",
-25: "Tissues",
-26: "Diseases",}
dfx["category"] = dfx["Etype"].apply(lambda x: entityType_2_functionType_dict[x])
unique = ["Compartments", "Tissues", "Diseases"]
sns_plot = sns.displot(dfx, x="log_num_ENSPs", kind="kde", hue="category", cut=True, clip=(0, 10), palette=dict(zip(unique, sns.color_palette(n_colors=len(unique)))))
sns_plot.savefig(r"/scratch/dblyon/agotool/data/PostgreSQL/tables/plots/Human2p_Log10_TermSize.pdf")
!head {Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_backtracked_rescaled}
from importlib import reload
reload(cst)
```
# Compartments
```
### GOCC from UniProt, in UniProt Entry Names, reduced to
fn_out = r"/scratch/dblyon/agotool/data/PostgreSQL/tables/Function_2_Protein_table_9606_UPS_temp_TM_discretization_comparison_v2.txt"
term_l, uniproten_l = [], []
with open(fn_out, "r") as fh:
for line in fh:
term, uniproten, etype = line.strip().split("\t")
if etype == "-22":
term_l.append(term)
uniproten_l.append(uniproten)
GOCC_uniprot_function_l = []
for uniprot, term in zip(uniproten_l, term_l):
GOCC_uniprot_function_l.append(uniprot + "_" + term)
print(len(GOCC_uniprot_function_l), len(set(GOCC_uniprot_function_l)))
GOCC_uniprot_function_set = set(GOCC_uniprot_function_l)
etype = -22
alpha = 0.2
beta = 0.7
### seems good
# alpha_22 = 0.325,
# beta_22 = 1.65,
### use backtracked and rescaled scores --> discretize and backtrack again
df_22 = cst.rescale_scores(df1[df1["Etype"] == etype], alpha=alpha)
df_22 = df_22[(df_22["Score"] >= 1.5) & (df_22["Rescaled_score"] <= beta)]
print(df_22.shape, " backtracked and rescaled scores")
print(df_22[df_22["Score"] == 5].shape, " Score of 5")
###### before final backtracking
### Score ECDF proportion
x1 = df1.loc[df1["Etype"] == etype, "Score"].to_list()
x2 = df_22["Score"].to_list()
x = pd.DataFrame()
x["Score"] = x1 + x2
x["Type"] = len(x1)*["all"] + len(x2)*["discretized"]
sns_plot = sns.displot(x, x="Score", kind="ecdf", hue="Type")
sns_plot.savefig(r"/scratch/dblyon/agotool/data/PostgreSQL/tables/plots/Human2p_ECDF_Score_all_vs_discretized_etype_{}_alpha_{}_beta_{}.pdf".format(etype, alpha, beta))
### backtracking
Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype = os.path.join(TABLES_DIR, "Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype_{}_alpha_{}_beta_{}.txt".format(etype, alpha, beta))
Protein_2_Function_DOID_BTO_GOCC_UPS_etype = os.path.join(TABLES_DIR, "Protein_2_Function_DOID_BTO_GOCC_UPS_etype_{}_alpha_{}_beta_{}.txt".format(etype, alpha, beta))
DOID_BTO_GOCC_without_lineage_etype = os.path.join(TABLES_DIR, "DOID_BTO_GOCC_without_lineage_{}_temp.txt".format(etype))
cst.backtrack_funcNames(df_22, lineage_dict_all_parents, secondary_2_primaryTerm_dict, obsolete_terms_set, ENSP_2_UniProtID_dict, Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype, Protein_2_Function_DOID_BTO_GOCC_UPS_etype, DOID_BTO_GOCC_without_lineage_etype, GO_CC_textmining_additional_etype=True)
##### after final backtracking
### Size of terms
dft = pd.read_csv(Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype, sep="\t")
df = dft[dft["ENSP"].isin(human_ENSPs)]
total_num_associations = df.shape[0]
print(total_num_associations, " total_num_associations")
print(len(df["funcName"].unique()), " num unique Compartments")
dfx = df.groupby("funcName")["ENSP"].count().reset_index().rename(columns={"ENSP": "num_ENSPs"})
dfx["log_num_ENSPs"] = dfx["num_ENSPs"].apply(lambda x: np.log10(x))
funcName_2_logNumENSPs_dict = {funcName_logENSPs[0]: funcName_logENSPs[1] for funcName_logENSPs in zip(dfx["funcName"].values, dfx["log_num_ENSPs"].values)}
dfx = df[["Etype", "funcName"]].drop_duplicates()
dfx["log_num_ENSPs"] = dfx["funcName"].apply(lambda x: funcName_2_logNumENSPs_dict[x])
dfx = dfx[dfx["log_num_ENSPs"] > 0] # more than 1 function per protein per genome
entityType_2_functionType_dict = {-20: "Compartments",
-25: "Tissues",
-26: "Diseases",}
dfx["category"] = dfx["Etype"].apply(lambda x: entityType_2_functionType_dict[x])
ax = sns.boxenplot(data=dfx, orient="h", x="log_num_ENSPs", y="category")
fn = r"/scratch/dblyon/agotool/data/PostgreSQL/tables/Function_2_Protein_table_9606_UPS_temp_TM_discretization_comparison_counts.txt"
df2m = pd.read_csv(fn, sep='\t')
cond = df2m["num_UniProtEN"] < 2
df2m = df2m[~cond]
cond_2_exclude = df2m["etype"].isin({-20, -25, -26})
df2m = df2m[~cond_2_exclude]
df2m = df2m.rename(columns={"etype": "Etype", "log_num_UniProtEN": "log10_num_genes_per_function"})
dfx = dfx.rename(columns={"log_num_ENSPs": "log10_num_genes_per_function"})
dfm = pd.concat([dfx[["Etype", "category", "log10_num_genes_per_function"]], df2m[["Etype", "category", "log10_num_genes_per_function"]]])
ax = sns.boxenplot(data=dfm, orient="h", x="log10_num_genes_per_function", y="category")
fn_out = r"/scratch/dblyon/agotool/data/PostgreSQL/tables/plots/Human2p_Boxenplot_TermSize_comparison_etype_{}_alpha_{}_beta_{}.pdf".format(etype, alpha, beta)
ax.get_figure().savefig(fn_out)
### Number of associations (compared to other categories)
d = {# "GOBP": 1191825,
"GOCC": 348647,
"GOMF": 237452,
"UniProtKW": 297363,
"KEGG": 27998,
"INTERPRO": 78688,
"PFAM": 28129,
# "PMID": 22143978,
"Reactome": 111639,
"WikiPathways": 26100}
d["Compartments"] = total_num_associations
df = pd.DataFrame()
df["category"] = d.keys()
df["number of associations"] = d.values()
df["log10"] = df["number of associations"].apply(lambda x: np.log10(x))
df.plot.bar(x="category", y="number of associations")
fn_out = r"/scratch/dblyon/agotool/data/PostgreSQL/tables/plots/Human2p_Barplot_TotalNumberAssociations_comparison_etype_{}_alpha_{}_beta_{}.pdf".format(etype, alpha, beta)
plt.savefig(fn_out)
### Term size of associations (compared to other categories)
GOCC_TM_uniprot_function_l = []
with open(Protein_2_Function_DOID_BTO_GOCC_UPS_etype, "r") as fh:
for line in fh:
if line.startswith("9606\t") and line.endswith("-20\n"):
taxid, uniproten, func_arr, etype = line.strip().split("\t")
for func in func_arr[1:-1].replace('"', "").replace("GOCC:", "GO:").split(","):
GOCC_TM_uniprot_function_l.append(uniproten + "_" + func)
GOCC_TM_uniprot_function_set = set(GOCC_TM_uniprot_function_l)
# Overlap with GOCC
a = GOCC_uniprot_function_set
b = GOCC_TM_uniprot_function_set
left = len(a - b)
right = len(b - a)
intersection = len(a.intersection(b))
total = len(a.union(b))
plt.figure(figsize=(5, 5))
v = venn2(subsets = (left, right, intersection), set_labels = ("GOCC UniProt", "GOCC Text Mining"), set_colors=('#377eb8', '#4daf4a'),
alpha = 0.7, subset_label_formatter = lambda x: str(x) + "\n(" + f"{(x/total):1.0%}" + ")")
venn2_circles(subsets = (left, right, intersection))
plt.title("Comparing human associations")
plt.savefig("/scratch/dblyon/agotool/data/PostgreSQL/tables/plots/Venn_diagram_GOCC_UniProt_vs_TextMining_alpha_{}_beta_{}.pdf".format(alpha, beta))
```
# Tissues
```
etype = -25
alpha = 0.2
beta = 0.7
# alpha_25 = 0.3,
# beta_25 = 1.4,
### use backtracked and rescaled scores --> discretize and backtrack again
df_22 = cst.rescale_scores(df1[df1["Etype"] == etype], alpha=alpha)
df_22 = df_22[(df_22["Score"] >= 1.5) & (df_22["Rescaled_score"] <= beta)]
print(df_22.shape, " backtracked and rescaled scores")
print(df_22[df_22["Score"] == 5].shape, " Score of 5")
###### before final backtracking
### Score ECDF proportion
x1 = df1.loc[df1["Etype"] == etype, "Score"].to_list()
x2 = df_22["Score"].to_list()
x = pd.DataFrame()
x["Score"] = x1 + x2
x["Type"] = len(x1)*["all"] + len(x2)*["discretized"]
sns_plot = sns.displot(x, x="Score", kind="ecdf", hue="Type")
sns_plot.savefig(r"/scratch/dblyon/agotool/data/PostgreSQL/tables/plots/Human2p_ECDF_Score_all_vs_discretized_etype_{}_alpha_{}_beta_{}.pdf".format(etype, alpha, beta))
### backtracking
Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype = os.path.join(TABLES_DIR, "Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype_{}_alpha_{}_beta_{}.txt".format(etype, alpha, beta))
Protein_2_Function_DOID_BTO_GOCC_UPS_etype = os.path.join(TABLES_DIR, "Protein_2_Function_DOID_BTO_GOCC_UPS_etype_{}_alpha_{}_beta_{}.txt".format(etype, alpha, beta))
DOID_BTO_GOCC_without_lineage_etype = os.path.join(TABLES_DIR, "DOID_BTO_GOCC_without_lineage_{}_temp.txt".format(etype))
cst.backtrack_funcNames(df_22, lineage_dict_all_parents, secondary_2_primaryTerm_dict, obsolete_terms_set, ENSP_2_UniProtID_dict, Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype, Protein_2_Function_DOID_BTO_GOCC_UPS_etype, DOID_BTO_GOCC_without_lineage_etype, GO_CC_textmining_additional_etype=True)
##### after final backtracking
### Size of terms
dft = pd.read_csv(Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype, sep="\t")
df = dft[dft["ENSP"].isin(human_ENSPs)]
total_num_associations = df.shape[0]
print(total_num_associations, " total_num_associations")
print(len(df["funcName"].unique()), " num unique Tissues")
dfx = df.groupby("funcName")["ENSP"].count().reset_index().rename(columns={"ENSP": "num_ENSPs"})
dfx["log_num_ENSPs"] = dfx["num_ENSPs"].apply(lambda x: np.log10(x))
funcName_2_logNumENSPs_dict = {funcName_logENSPs[0]: funcName_logENSPs[1] for funcName_logENSPs in zip(dfx["funcName"].values, dfx["log_num_ENSPs"].values)}
dfx = df[["Etype", "funcName"]].drop_duplicates()
dfx["log_num_ENSPs"] = dfx["funcName"].apply(lambda x: funcName_2_logNumENSPs_dict[x])
dfx = dfx[dfx["log_num_ENSPs"] > 0] # more than 1 function per protein per genome
entityType_2_functionType_dict = {-20: "Compartments",
-25: "Tissues",
-26: "Diseases",}
dfx["category"] = dfx["Etype"].apply(lambda x: entityType_2_functionType_dict[x])
ax = sns.boxenplot(data=dfx, orient="h", x="log_num_ENSPs", y="category")
fn = r"/scratch/dblyon/agotool/data/PostgreSQL/tables/Function_2_Protein_table_9606_UPS_temp_TM_discretization_comparison_counts.txt"
df2m = pd.read_csv(fn, sep='\t')
cond = df2m["num_UniProtEN"] < 2
df2m = df2m[~cond]
cond_2_exclude = df2m["etype"].isin({-20, -25, -26})
df2m = df2m[~cond_2_exclude]
df2m = df2m.rename(columns={"etype": "Etype", "log_num_UniProtEN": "log10_num_genes_per_function"})
dfx = dfx.rename(columns={"log_num_ENSPs": "log10_num_genes_per_function"})
dfm = pd.concat([dfx[["Etype", "category", "log10_num_genes_per_function"]], df2m[["Etype", "category", "log10_num_genes_per_function"]]])
ax = sns.boxenplot(data=dfm, orient="h", x="log10_num_genes_per_function", y="category")
fn_out = r"/scratch/dblyon/agotool/data/PostgreSQL/tables/plots/Human2p_Boxenplot_TermSize_comparison_etype_{}_alpha_{}_beta_{}.pdf".format(etype, alpha, beta)
ax.get_figure().savefig(fn_out)
### Number of associations (compared to other categories)
d = {# "GOBP": 1191825,
"GOCC": 348647,
"GOMF": 237452,
"UniProtKW": 297363,
"KEGG": 27998,
"INTERPRO": 78688,
"PFAM": 28129,
# "PMID": 22143978,
"Reactome": 111639,
"WikiPathways": 26100}
d["Tissues"] = total_num_associations
df = pd.DataFrame()
df["category"] = d.keys()
df["number of associations"] = d.values()
df["log10"] = df["number of associations"].apply(lambda x: np.log10(x))
df.plot.bar(x="category", y="number of associations")
fn_out = r"/scratch/dblyon/agotool/data/PostgreSQL/tables/plots/Human2p_Barplot_TotalNumberAssociations_comparison_etype_{}_alpha_{}_beta_{}.pdf".format(etype, alpha, beta)
plt.savefig(fn_out)
```
# Diseases
```
etype = -26
alpha = 0.2
beta = 0.7
# alpha = 0.15
# beta = 0.75
# alpha = 0.17
# beta = 0.75
# alpha = 0.15
# beta = 0.75
# alpha = 0.175
# beta = 0.8
# alpha = 0.2
# beta = 0.8
# alpha = 0.3
# beta = 1.0
### use backtracked and rescaled scores --> discretize and backtrack again
df_22 = cst.rescale_scores(df1[df1["Etype"] == etype], alpha=alpha)
df_22 = df_22[(df_22["Score"] >= 1.5) & (df_22["Rescaled_score"] <= beta)]
print(df_22.shape, " backtracked and rescaled scores")
print(df_22[df_22["Score"] == 5].shape, " Score of 5")
###### before final backtracking
### Score ECDF proportion
x1 = df1.loc[df1["Etype"] == etype, "Score"].to_list()
x2 = df_22["Score"].to_list()
x = pd.DataFrame()
x["Score"] = x1 + x2
x["Type"] = len(x1)*["all"] + len(x2)*["discretized"]
sns_plot = sns.displot(x, x="Score", kind="ecdf", hue="Type")
sns_plot.savefig(r"/scratch/dblyon/agotool/data/PostgreSQL/tables/plots/Human2p_ECDF_Score_all_vs_discretized_etype_{}_alpha_{}_beta_{}.pdf".format(etype, alpha, beta))
### backtracking
Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype = os.path.join(TABLES_DIR, "Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype_{}_alpha_{}_beta_{}.txt".format(etype, alpha, beta))
Protein_2_Function_DOID_BTO_GOCC_UPS_etype = os.path.join(TABLES_DIR, "Protein_2_Function_DOID_BTO_GOCC_UPS_etype_{}_alpha_{}_beta_{}.txt".format(etype, alpha, beta))
DOID_BTO_GOCC_without_lineage_etype = os.path.join(TABLES_DIR, "DOID_BTO_GOCC_without_lineage_{}_temp.txt".format(etype))
cst.backtrack_funcNames(df_22, lineage_dict_all_parents, secondary_2_primaryTerm_dict, obsolete_terms_set, ENSP_2_UniProtID_dict, Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype, Protein_2_Function_DOID_BTO_GOCC_UPS_etype, DOID_BTO_GOCC_without_lineage_etype, GO_CC_textmining_additional_etype=True)
##### after final backtracking
### Size of terms
dft = pd.read_csv(Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype, sep="\t")
df = dft[dft["ENSP"].isin(human_ENSPs)]
total_num_associations = df.shape[0]
print(total_num_associations, " total_num_associations")
print(len(df["funcName"].unique()), " num unique diseases")
dfx = df.groupby("funcName")["ENSP"].count().reset_index().rename(columns={"ENSP": "num_ENSPs"})
dfx["log_num_ENSPs"] = dfx["num_ENSPs"].apply(lambda x: np.log10(x))
funcName_2_logNumENSPs_dict = {funcName_logENSPs[0]: funcName_logENSPs[1] for funcName_logENSPs in zip(dfx["funcName"].values, dfx["log_num_ENSPs"].values)}
dfx = df[["Etype", "funcName"]].drop_duplicates()
dfx["log_num_ENSPs"] = dfx["funcName"].apply(lambda x: funcName_2_logNumENSPs_dict[x])
dfx = dfx[dfx["log_num_ENSPs"] > 0] # more than 1 function per protein per genome
entityType_2_functionType_dict = {-20: "Compartments",
-25: "Tissues",
-26: "Diseases",}
dfx["category"] = dfx["Etype"].apply(lambda x: entityType_2_functionType_dict[x])
# ax = sns.boxenplot(data=dfx, orient="h", x="log_num_ENSPs", y="category") --> not shown, since included in comparison with other etypes
fn = r"/scratch/dblyon/agotool/data/PostgreSQL/tables/Function_2_Protein_table_9606_UPS_temp_TM_discretization_comparison_counts.txt"
df2m = pd.read_csv(fn, sep='\t')
cond = df2m["num_UniProtEN"] < 2
df2m = df2m[~cond]
cond_2_exclude = df2m["etype"].isin({-20, -25, -26})
df2m = df2m[~cond_2_exclude]
df2m = df2m.rename(columns={"etype": "Etype", "log_num_UniProtEN": "log10_num_genes_per_function"})
dfx = dfx.rename(columns={"log_num_ENSPs": "log10_num_genes_per_function"})
dfm = pd.concat([dfx[["Etype", "category", "log10_num_genes_per_function"]], df2m[["Etype", "category", "log10_num_genes_per_function"]]])
ax = sns.boxenplot(data=dfm, orient="h", x="log10_num_genes_per_function", y="category")
fn_out = r"/scratch/dblyon/agotool/data/PostgreSQL/tables/plots/Human2p_Boxenplot_TermSize_comparison_etype_{}_alpha_{}_beta_{}.pdf".format(etype, alpha, beta)
ax.get_figure().savefig(fn_out)
### Number of associations (compared to other categories)
d = {# "GOBP": 1191825,
"GOCC": 348647,
"GOMF": 237452,
"UniProtKW": 297363,
"KEGG": 27998,
"INTERPRO": 78688,
"PFAM": 28129,
# "PMID": 22143978,
"Reactome": 111639,
"WikiPathways": 26100}
d["Diseases"] = total_num_associations
df = pd.DataFrame()
df["category"] = d.keys()
df["number of associations"] = d.values()
df["log10"] = df["number of associations"].apply(lambda x: np.log10(x))
df.plot.bar(x="category", y="number of associations")
fn_out = r"/scratch/dblyon/agotool/data/PostgreSQL/tables/plots/Human2p_Barplot_TotalNumberAssociations_comparison_etype_{}_alpha_{}_beta_{}.pdf".format(etype, alpha, beta)
plt.savefig(fn_out)
```
# Summary
## Number of associations after each step
```
# !ls -lah {Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_reformatted}
# !head {Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_reformatted}
df = pd.read_csv(Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_reformatted, sep='\t') #, names=["Taxid", "Etype", "ENSP", "funcName", "Score"])
df = df[df["ENSP"].isin(human_ENSPs)]
df.groupby("Etype")["ENSP"].count()
# -26 4542678
# -25 6721335
# -22 3709558
df = pd.read_csv(Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_backtracked, sep='\t')
df = df[df["ENSP"].isin(human_ENSPs)]
df.groupby("Etype")["ENSP"].count()
# -26 5344975
# -25 7117392
# -22 4510937
df = pd.read_csv(Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized, sep='\t')
df = df[df["ENSP"].isin(human_ENSPs)]
df.groupby("Etype")["ENSP"].count()
# -26 67188
# -25 156394
# -22 212275
df = pd.read_csv(Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked, sep='\t')
df = df[df["ENSP"].isin(human_ENSPs)]
df.groupby("Etype")["ENSP"].count()
# -26 131165
# -25 251709
# -20 423365
### Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_reformatted Taxid 9606 (before backtracking)
# Etype
# -26 4541099
# -25 6720383
# -22 3709147
### --> Score of 5
# -25 1605
# -22 122656
### Protein_2_Function_and_Score_DOID_BTO_GOCC_STS_backtracked Taxid 9606 (after 1. backtracking)
# Etype
# -26 5344821
# -25 7117392
# -22 4510937
### Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized Taxid 9606 (after discretization)
# Etype
# -26 55991
# -25 156394
# -22 212275
### Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked Taxid 9606 (after second backtracking), final
# Etype
# -26 131165
# -25 251709
# -20 423365
step = ["unmodified source", "1. backtracking", "discretization", "2. backtracking"]
# reducing to ENSPs in STRING 11 (Taxid 9606 only)
e26 = [4541099, 5344821, 55991, 131165]
e25 = [6720383, 7117392, 156394, 251709]
e22 = [3709147, 4510937, 212275, 423365]
df = pd.DataFrame()
df["step"] = step
df["Compartments"] = e22
df["Diseases"] = e26
df["Tissues"] = e25
df = df.set_index("step")
df["Compartments"] = df["Compartments"].astype("float")
df["Diseases"] = df["Diseases"].astype("float")
df["Tissues"] = df["Tissues"].astype("float")
pd.options.display.float_format = '{:.1e}'.format
# pd.reset_option('^display.', silent=True)
df
ax = df.plot.bar()
fn_out = r"/scratch/dblyon/agotool/data/PostgreSQL/tables/plots/Human2p_Barplot_number_associations_after_each_step.pdf"
# plt.savefig(fn_out)
fig = ax.get_figure()
fig.savefig(fn_out)
vals = 100* np.append(df["Compartments"].values[1:], df["Compartments"].values[-1]) / df["Compartments"].values
df["Compartements_perc"] = np.append(vals[-1], vals[:-1])
vals = 100* np.append(df["Diseases"].values[1:], df["Diseases"].values[-1]) / df["Diseases"].values
df["Diseases_perc"] = np.append(vals[-1], vals[:-1])
vals = 100* np.append(df["Tissues"].values[1:], df["Tissues"].values[-1]) / df["Tissues"].values
df["Tissues_perc"] = np.append(vals[-1], vals[:-1])
df[['Compartements_perc', 'Diseases_perc', 'Tissues_perc']].plot.bar()
fn_out = r"/scratch/dblyon/agotool/data/PostgreSQL/tables/plots/Human2p_Barplot_number_associations_after_each_step_in_percent_relative_to_previous_step.pdf"
plt.savefig(fn_out)
df
pd.reset_option('^display.', silent=True)
df
```
## Ggplot number comparison
```
# Function_2_Protein_table_UPS = os.path.join(TABLES_DIR, "Function_2_Protein_table_UPS.txt")
# Function_2_Protein_table_9606_temp = os.path.join(TABLES_DIR, "Function_2_Protein_table_9606_temp.txt")
# temp_9606_Number_of_proteins_per_function
Protein_2_Function_table_UPS = os.path.join(TABLES_DIR, "Protein_2_Function_table_UPS.txt")
Protein_2_Function_table_UPS_9606_temp = os.path.join(TABLES_DIR, "Protein_2_Function_table_UPS_9606_temp.txt")
Protein_2_Function_table_UPS_9606_subset_STRING_v11_temp = os.path.join(TABLES_DIR, "Protein_2_Function_table_UPS_9606_subset_STRING_v11_temp.txt")
# !grep -P "^9606\t" {Function_2_Protein_table_UPS} > {Function_2_Protein_table_9606_temp}
### grep human proteins
!grep -P "^9606\t" {Protein_2_Function_table_UPS} > {Protein_2_Function_table_UPS_9606_temp}
### reduce to human_UniProt_entries
human_UniProt_entries[:3]
!head {Protein_2_Function_table_UPS_9606_temp}
from collections import defaultdict
etype_2_count_dict = defaultdict(int)
with open(Protein_2_Function_table_UPS_9606_temp, "r") as fh_in:
for line in fh_in:
taxid, uniproten, funcarr, etype = line.strip().split("\t")
if uniproten not in human_UniProt_entries:
continue
etype_2_count_dict[etype] += funcarr.count(",") + 1
etype_2_count_dict
functionType_2_entityType_dict = {"Gene Ontology cellular component TEXTMINING": -20,
"Gene Ontology biological process": -21,
"Gene Ontology cellular component": -22,
"Gene Ontology molecular function": -23,
"Brenda Tissue Ontology": -25,
"Disease Ontology": -26,
"UniProt keywords": -51,
"KEGG (Kyoto Encyclopedia of Genes and Genomes)": -52,
"SMART (Simple Modular Architecture Research Tool)": -53,
"INTERPRO": -54,
"PFAM (Protein FAMilies)": -55,
"PMID": -56,
"Reactome": -57,
"WikiPathways": -58}
library(ggplot2)
library(ggforce)
## ggplot Barplot number of associations
df <- data.frame(category = c('Compartments', 'Tissues', 'Diseases',
'GOBP', 'GOCC', 'GOMF',
'UniProtKW', 'KEGG', 'INTERPRO',
'PFAM', 'PMID', 'Reactome',
'WikiPathways'),
num_associations = c(417307, 248806, 129730,
1191825, 348647, 237452,
297363, 27998, 78688,
28129, 22210473, 111639,
26494))
p <- ggplot(df) +
aes(x = reorder(category, num_associations), y = num_associations, fill = reorder(category, num_associations)) +
geom_bar(stat="identity") + theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
facet_zoom(ylim = c(0, 450000))
p
ggsave("/Users/dblyon/modules/cpr/agotool/data/PostgreSQL/tables/plots/Barplot_num_associations_of_all_categories_including_TM_discretization_9606_STRINGv11_2p_v3.pdf")
```
## Term size comparison
```
etype = -22
alpha = 0.325
beta = 1.65
Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype = os.path.join(TABLES_DIR, "Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype_{}_alpha_{}_beta_{}.txt".format(etype, alpha, beta))
df = pd.read_csv(Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype, sep="\t")
df.head()
etype = -22
alpha = 0.325
beta = 1.65
Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype = os.path.join(TABLES_DIR, "Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype_{}_alpha_{}_beta_{}.txt".format(etype, alpha, beta))
df = pd.read_csv(Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype, sep="\t")
df = df[df["ENSP"].isin(human_ENSPs)]
total_num_associations = df.shape[0]
print(total_num_associations, " total_num_associations")
print(len(df["funcName"].unique()), " num unique diseases")
dfx = df.groupby("funcName")["ENSP"].count().reset_index().rename(columns={"ENSP": "num_ENSPs"})
dfx["log_num_ENSPs"] = dfx["num_ENSPs"].apply(lambda x: np.log10(x))
funcName_2_logNumENSPs_dict = {funcName_logENSPs[0]: funcName_logENSPs[1] for funcName_logENSPs in zip(dfx["funcName"].values, dfx["log_num_ENSPs"].values)}
dfx = df[["Etype", "funcName"]].drop_duplicates()
dfx["log_num_ENSPs"] = dfx["funcName"].apply(lambda x: funcName_2_logNumENSPs_dict[x])
dfx = dfx[dfx["log_num_ENSPs"] > 0] # more than 1 function per protein per genome
entityType_2_functionType_dict = {-20: "Compartments",
-25: "Tissues",
-26: "Diseases",}
dfx["category"] = dfx["Etype"].apply(lambda x: entityType_2_functionType_dict[x])
df_22 = dfx[["Etype", "category", "log_num_ENSPs"]]
etype = -25
alpha = 0.3
beta = 1.4
Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype = os.path.join(TABLES_DIR, "Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype_{}_alpha_{}_beta_{}.txt".format(etype, alpha, beta))
df = pd.read_csv(Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype, sep="\t")
df = df[df["ENSP"].isin(human_ENSPs)]
total_num_associations = df.shape[0]
print(total_num_associations, " total_num_associations")
print(len(df["funcName"].unique()), " num unique diseases")
dfx = df.groupby("funcName")["ENSP"].count().reset_index().rename(columns={"ENSP": "num_ENSPs"})
dfx["log_num_ENSPs"] = dfx["num_ENSPs"].apply(lambda x: np.log10(x))
funcName_2_logNumENSPs_dict = {funcName_logENSPs[0]: funcName_logENSPs[1] for funcName_logENSPs in zip(dfx["funcName"].values, dfx["log_num_ENSPs"].values)}
dfx = df[["Etype", "funcName"]].drop_duplicates()
dfx["log_num_ENSPs"] = dfx["funcName"].apply(lambda x: funcName_2_logNumENSPs_dict[x])
dfx = dfx[dfx["log_num_ENSPs"] > 0] # more than 1 function per protein per genome
entityType_2_functionType_dict = {-20: "Compartments",
-25: "Tissues",
-26: "Diseases",}
dfx["category"] = dfx["Etype"].apply(lambda x: entityType_2_functionType_dict[x])
df_25 = dfx[["Etype", "category", "log_num_ENSPs"]]
etype = -26
alpha = 0.15
beta = 0.75
Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype = os.path.join(TABLES_DIR, "Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype_{}_alpha_{}_beta_{}.txt".format(etype, alpha, beta))
df = pd.read_csv(Protein_2_Function_DOID_BTO_GOCC_STS_backtracked_discretized_backtracked_etype, sep="\t")
df = df[df["ENSP"].isin(human_ENSPs)]
total_num_associations = df.shape[0]
print(total_num_associations, " total_num_associations")
print(len(df["funcName"].unique()), " num unique diseases")
dfx = df.groupby("funcName")["ENSP"].count().reset_index().rename(columns={"ENSP": "num_ENSPs"})
dfx["log_num_ENSPs"] = dfx["num_ENSPs"].apply(lambda x: np.log10(x))
funcName_2_logNumENSPs_dict = {funcName_logENSPs[0]: funcName_logENSPs[1] for funcName_logENSPs in zip(dfx["funcName"].values, dfx["log_num_ENSPs"].values)}
dfx = df[["Etype", "funcName"]].drop_duplicates()
dfx["log_num_ENSPs"] = dfx["funcName"].apply(lambda x: funcName_2_logNumENSPs_dict[x])
dfx = dfx[dfx["log_num_ENSPs"] > 0] # more than 1 function per protein per genome
entityType_2_functionType_dict = {-20: "Compartments",
-25: "Tissues",
-26: "Diseases",}
dfx["category"] = dfx["Etype"].apply(lambda x: entityType_2_functionType_dict[x])
df_26 = dfx[["Etype", "category", "log_num_ENSPs"]]
fn = r"/scratch/dblyon/agotool/data/PostgreSQL/tables/Function_2_Protein_table_9606_UPS_temp_TM_discretization_comparison_counts.txt"
df2m = pd.read_csv(fn, sep='\t')
cond = df2m["num_UniProtEN"] < 2
df2m = df2m[~cond]
cond_2_exclude = df2m["etype"].isin({-20, -25, -26})
df2m = df2m[~cond_2_exclude]
df2m = df2m.rename(columns={"etype": "Etype", "log_num_UniProtEN": "log_num_ENSPs"})
df2m = df2m[["Etype", "category", "log_num_ENSPs"]]
dfm = pd.concat([df_22, df_25, df_26, df2m])
dfm = dfm.rename(columns={"log_num_ENSPs": "log10 num genes per function"})
ax = sns.boxenplot(data=dfm, orient="h", x="log10 num genes per function", y="category")
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
rcParams.update({'figure.subplot.left': 0.1})
fn_out = r"/scratch/dblyon/agotool/data/PostgreSQL/tables/plots/Human2p_Boxenplot_Comparison_TextMining_vs_rest_log10_num_genes_per_function_v5.pdf"
plt.figure(figsize=(20, 6))
ax.get_figure().savefig(fn_out)
ax = sns.boxenplot(data=dfm, orient="h", x="log10 num genes per function", y="category")
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
rcParams.update({'figure.subplot.left': 0.1})
fn_out = r"/scratch/dblyon/agotool/data/PostgreSQL/tables/plots/Human2p_Boxenplot_Comparison_TextMining_vs_rest_log10_num_genes_per_function_v5.pdf"
plt.figure(figsize=(20, 6))
ax.get_figure().savefig(fn_out)
from scipy import stats
a = 5
b = 10
c = 10
d = 20000
stats.fisher_exact([[a, b], [c, d]])
### Debugging origin of functional association for given ENSP
# ENSP = "9606.ENSP00000340944"
# funcName = "GO:0016020" # membrane
# # PTPN11 (ENSP00000340944), is associated with a “Membrane” term with 3 stars.
# # original table
# # original Score from Lars is 3.067649
# # backtracking needs to be fixed
# cond = (df1["ENSP"] == ENSP) & (df1["funcName"] == funcName)
# df1[cond]
# df_22 = cst.rescale_scores(df1[df1["Etype"] == etype], alpha=alpha)
# df_22 = df_22[(df_22["Score"] >= 1.5) & (df_22["Rescaled_score"] <= beta)]
# # is any of these terms a child of "membrane"
# funcNames_2_check = sorted(df_22.loc[df_22["ENSP"] == ENSP, "funcName"].to_list())
# funcNames_2_check = [ele.replace("GOCC:", "GO:") for ele in funcNames_2_check]
# funcNames_2_check[:3]
# funcName = "GO:0016020" # membrane
# which_child_has_this_parent(funcNames_2_check, funcName, lineage_dict)
# # GO:0005785 has GO:0016020 as a parent
# # GO:0070618 has GO:0016020 as a parent
# l = ["GO:0070618", "GO:0005785"]
# cond = (df1["ENSP"] == ENSP) & (df1["funcName"].isin(l))
# df1[cond]
# # GO:0005785 has GO:0016020 as a parent
# # GO:0070618 has GO:0016020 as a parent
# l = ["GO:0070618", "GO:0005785", "GO:0016020"]
# cond = (df_22["funcName"].isin(l)) & (df_22["ENSP"] == ENSP)
# df_22[cond]
# etype = -22
# alpha = 0.325
# beta = 1.65
# df_22 = cst.rescale_scores(df1[df1["Etype"] == etype], alpha=alpha)
# l = ["GO:0070618", "GO:0005785", "GO:0016020"]
# ENSP = "9606.ENSP00000340944"
# cond = (df_22["funcName"].isin(l)) & (df_22["ENSP"] == ENSP)
# df_22[cond]
# l = ["GO:0070618", "GO:0005785", "GO:0016020"]
# ENSP = "9606.ENSP00000340944"
# df_22 = df_22[(df_22["Score"] >= 1.5) & (df_22["Rescaled_score"] <= beta)]
# df_22[cond]
# # GO:0005785 has GO:0016020 as a parent
# # GO:0070618 has GO:0016020 as a parent
# l = ["GOCC:0070618", "GOCC:0005785", "GOCC:0016020"]
# cond = (dft["funcName"].isin(l)) & (dft["ENSP"] == ENSP)
# dft[cond]
```
| github_jupyter |
## Train a Scikit-Learn Model using SageMaker Script Mode
#### Bring Your Own Script (BYOS)
### Create Train Script
```
%%file train.py
from sklearn.neighbors import KNeighborsClassifier
from os.path import join
from io import BytesIO
import pandas as pd
import numpy as np
import argparse
import logging
import pickle
import time
import json
import sys
import os
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
if 'SAGEMAKER_METRICS_DIRECTORY' in os.environ:
log_file_handler = logging.FileHandler(join(os.environ['SAGEMAKER_METRICS_DIRECTORY'], "metrics.json"))
log_file_handler.setFormatter("{'time':'%(asctime)s', 'name': '%(name)s', 'level': '%(levelname)s', 'message': '%(message)s'}")
logger.addHandler(log_file_handler)
def model_fn(model_dir):
print('[-------------- INSIDE MODEL FN --------------]')
print(f'MODEL DIR: {model_dir}')
model = pickle.load(open(os.path.join(model_dir, 'model'), 'rb'))
return model
def input_fn(request_body, request_content_type):
print('[-------------- INSIDE INPUT FN --------------]')
print(f'REQUEST BODY: {request_body}')
print(f'REQUEST CONTENT TYPE: {request_content_type}')
if request_content_type == 'application/x-npy':
stream = BytesIO(request_body)
return np.load(stream)
else:
raise ValueError('Content type must be application/x-npy')
def predict_fn(input_data, model):
print('[-------------- INSIDE PREDICT FN --------------]')
print(f'INPUT DATA: {input_data}')
print(f'MODEL: {model}')
X = input_data.reshape(1, -1)
prediction = model.predict(X)
return prediction
def output_fn(prediction, content_type):
print('[-------------- INSIDE OUTPUT FN --------------]')
print(f'PREDICTION: {prediction}')
print(f'CONTENT TYPE: {content_type}')
if content_type == 'application/x-npy':
buffer = BytesIO()
np.save(buffer, prediction)
return buffer.getvalue(), 'application/x-npy'
else:
raise ValueError('Accept header must be application/x-npy')
def train():
parser = argparse.ArgumentParser()
parser.add_argument('--output-data-dir', type=str, default=os.environ.get('SM_OUTPUT_DATA_DIR'))
parser.add_argument('--model-dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))
parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_TEST'))
# hyperparameters
parser.add_argument('--nneighbors', type=int, default=5)
args = parser.parse_args()
# ------------------------- YOUR MODEL TRAINING LOGIC STARTS HERE -------------------------
# Load data from the location specified by args.train (In this case, an S3 bucket)
print("------- [STARTING TRAINING] -------")
train_df = pd.read_csv(os.path.join(args.train, 'train.csv'), names=['class', 'bmi', 'diastolic_bp_change', 'systolic_bp_change', 'respiratory_rate'])
train_df.head()
X_train = train_df[['bmi', 'diastolic_bp_change', 'systolic_bp_change', 'respiratory_rate']]
y_train = train_df['class']
knn = KNeighborsClassifier(n_neighbors=args.nneighbors)
knn.fit(X_train, y_train)
# Save the trained Model inside the Container
pickle.dump(knn, open(os.path.join(args.model_dir, 'model'), 'wb'))
print("------- [TRAINING COMPLETE!] -------")
print("------- [STARTING EVALUATION] -------")
test_df = pd.read_csv(os.path.join(args.test, 'test.csv'), names=['class', 'bmi', 'diastolic_bp_change', 'systolic_bp_change', 'respiratory_rate'])
X_test = train_df[['bmi', 'diastolic_bp_change', 'systolic_bp_change', 'respiratory_rate']]
y_test = train_df['class']
acc = knn.score(X_test, y_test)
print('Accuracy = {:.4f}%'.format(acc * 100))
logger.info('Test Accuracy: {:.4f}%'.format(acc * 100))
print("------- [EVALUATION DONE!] -------")
if __name__ == '__main__':
train()
```
### Imports
```
from sagemaker.sklearn.estimator import SKLearn
from sagemaker import get_execution_role
import pandas as pd
import sagemaker
```
### Essentials
```
role = get_execution_role()
session = sagemaker.Session()
```
### Train using SageMaker
```
WORK_DIRECTORY = '.././DATA'
train_data_s3_pointer = session.upload_data(f'{WORK_DIRECTORY}/train', key_prefix='byos-sklearn/train')
test_data_s3_pointer = session.upload_data(f'{WORK_DIRECTORY}/test', key_prefix='byos-sklearn/test')
estimator = SKLearn(entry_point='train.py',
instance_type='ml.m5.large',
instance_count=1,
framework_version='0.23-1',
role=role)
estimator.fit({'train': train_data_s3_pointer, 'test': test_data_s3_pointer})
```
### Deploy Trained Model as SageMaker Endpoint
```
predictor = estimator.deploy(instance_type='ml.m5.large',
initial_instance_count=1)
```
### Test Real-Time Inference
```
df = pd.read_csv('.././DATA/test/test.csv', header=None)
test_df = df.sample(1)
test_df.drop(test_df.columns[[0]], axis=1, inplace=True)
test_df
test_df.values
prediction = predictor.predict(test_df.values)
prediction
```
| github_jupyter |
# Stiffness in Initial Value Problems
Copyright (C) 2020 Andreas Kloeckner
<details>
<summary>MIT License</summary>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
</details>
```
import numpy as np
import matplotlib.pyplot as pt
```
Consider $y'=-100y+100t + 101$.
Exact solution: $y(t)=1+t+ce^{-100t}$.
Exact solution derivative: $y'(t)=1-100ce^{-100t}$.
```
def f(t, y):
return -100*y+100*t + 101
t_end = 0.2
def plot_solution(t0, y0):
c = (y0-1-t0)/np.exp(-100*t0)
t_mesh = np.linspace(t0, t_end, 1000)
solution = 1+t_mesh+c*np.exp(-100*t_mesh)
pt.plot(t_mesh, solution, label="exact")
pt.plot(t0, y0, "ko")
plot_solution(t0=0, y0=1)
plot_solution(t0=0, y0=1.2)
plot_solution(t0=0, y0=-0.5)
plot_solution(t0=0.05, y0=-0.5)
```
Here's a helper function that uses a time stepper in the form of a `step_function` to numerically solve an ODE and plot the numerical solution:
```
def integrate_ode(step_function, t0, y0, h):
times = [t0]
ys = [y0]
while times[-1] <= t_end + 1e-14:
t = times[-1]
ys.append(step_function(t, ys[-1], h))
times.append(t + h)
pt.plot(times, ys, label=step_function.__name__)
pt.xlim([t0, t_end])
pt.ylim([-1, 2])
pt.legend(loc="best")
```
## Using an Explicit Method
First, implement `forward_euler_step(tk, yk, h)`:
```
#clear
def forward_euler_step(tk, yk, h):
return yk + h*f(tk, yk)
t0 = 0.05
y0 = -0.5
h = 0.008 # start this at 0.001, then grow
plot_solution(t0=t0, y0=y0)
integrate_ode(forward_euler_step, t0=t0, y0=y0, h=h)
```
* What's the main challenge here?
## Using an Implicit Method
Next, implement `backward_euler_step(tk, yk, h)`:
```
#clear
def backward_euler_step(tk, yk, h):
tkp1 = tk+h
return (yk + h*(100*tkp1 + 101))/(1+100*h)
t0 = 0.05
y0 = -0.5
h = 0.05 # start this at 0.001, then grow
plot_solution(t0=t0, y0=y0)
integrate_ode(backward_euler_step, t0=t0, y0=y0, h=h)
pt.xlim([t0, t_end])
pt.ylim([-1, 2])
pt.legend()
```
| github_jupyter |
# Python Text Basics Assessment
Welcome to your assessment! Complete the tasks described in bold below by typing the relevant code in the cells.<br>
You can compare your answers to the Solutions notebook provided in this folder.
## f-Strings
#### 1. Print an f-string that displays `NLP stands for Natural Language Processing` using the variables provided.
```
abbr = 'NLP'
full_text = 'Natural Language Processing'
# Enter your code here:
standard= " stands for "
abbr+standard+full_text
##Dictionary = [(abbr),(full_text)]
##Dictionary
print(f"{abbr} stands for {full_text}")
```
## Files
#### 2. Create a file in the current working directory called `contacts.txt` by running the cell below:
```
%%writefile contacts.txt
First_Name Last_Name, Title, Extension, Email
```
#### 3. Open the file and use .read() to save the contents of the file to a string called `fields`. Make sure the file is closed at the end.
```
pwd
# Write your code here:
myfile = open('contacts.txt')
# Run fields to see the contents of contacts.txt:
fields = myfile.read()
print(fields)
# Fechando o arquivo
myfile.close()
```
## Working with PDF Files
#### 4. Use PyPDF2 to open the file `Business_Proposal.pdf`. Extract the text of page 2.
```
# Perform import
import PyPDF2
# Open the file as a binary object
Myfile2 = open('Business_Proposal.pdf', mode='rb')
# Use PyPDF2 to read the text of the file
PDF_reader = PyPDF2.PdfFileReader(Myfile2)
PDF_reader.numPages
# Get the text from page 2 (CHALLENGE: Do this in one step!)
page_two_text = PDF_reader.getPage(1)
Pg2=page_two_text.extractText()
# Close the file
Myfile2.close()
# Print the contents of page_two_text
print(Pg2)
```
#### 5. Open the file `contacts.txt` in append mode. Add the text of page 2 from above to `contacts.txt`.
#### CHALLENGE: See if you can remove the word "AUTHORS:"
```
import re
Myfile3 = open("contacts.txt",mode="a+")
Myfile3.write(f"{Pg2}")
Myfile3.seek(0)
My3 = print(Myfile3.read())
print(My3)
# CHALLENGE Solution (re-run the %%writefile cell above to obtain an unmodified contacts.txt file):
with open("contacts.txt", "a+") as f:
f.write(Pg2[8:])
f.seek(0)
print(f.read())
```
## Regular Expressions
#### 6. Using the `page_two_text` variable created above, extract any email addresses that were contained in the file `Business_Proposal.pdf`.
```
import re
# Enter your regex pattern here. This may take several tries!
pattern = ("[\w]+@+[\w]+.[\w]+")
print(Pg2)
print(pattern)
email_list = re.findall(pattern,Pg2)
print(email_list)
```
### Great job!
| github_jupyter |
```
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
from sqlalchemy import create_engine, inspect
engine = create_engine("sqlite:///../Resources/hawaii.sqlite")
#Data inspection
inspector = inspect(engine)
print(inspector.get_table_names())
columns = inspector.get_columns("measurement")
for c in columns:
print(c["name"], c["type"])
columns = inspector.get_columns("station")
for c in columns:
print(c["name"], c["type"])
query = """
Select
*
from
station
limit 100
"""
conn = engine.connect()
df = pd.read_sql(query, con=conn)
conn.close()
df.tail()
```
# Precipitation Analysis
```
getLastYearMeasures = ""
with open("getLastYearMeasures.sql", "r") as f:
getLastYearMeasures = f.read()
getLastYearMeasures
conn = engine.connect()
df = pd.read_sql(getLastYearMeasures, con=conn)
conn.close()
df["date"] = pd.to_datetime(df["date"])
df.head()
#Booth's notes from this in office hours
#######
# only had to do if bringing back all rows
# maxDate = df.loc[len(df) - 1, "date"]
# startDate = maxDate - datetime.timedelta(days=365)
# #filter to last year
# df_sub = df.loc[df["date"] >= startDate].reset_index(drop=True)
# df_sub.head()
df.set_index("date").plot()
plt.figure(figsize=(10,6))
plt.plot(df.date, df.prcp)
plt.title("Precipitation in Hawaii Over Last 12 Months", fontweight="bold", fontsize=18)
plt.ylabel("Precipitation (in)", fontsize=18)
plt.xlabel("")
plt.show()
df.describe()
```
# Stations
```
query = """
SELECT
count(*) as station_count
FROM
station
"""
conn = engine.connect()
df = pd.read_sql(query, con=conn)
conn.close()
df
query = """
SELECT
s.station,
count(*) as tot_obs
FROM
station s
JOIN measurement m on s.station = m.station
GROUP BY s.station
ORDER BY
count(*) desc
"""
conn = engine.connect()
df = pd.read_sql(query, con=conn)
conn.close()
df
query = """
SELECT
m.date,
m.prcp,
m.tobs,
s.station,
s.name
FROM
measurement m
join station s on m.station = s.station
WHERE
date >= (
SELECT
date(MAX(date), '-365 day')
FROM
measurement
)
ORDER BY
date
"""
conn = engine.connect()
df = pd.read_sql(query, con=conn)
conn.close()
df
df.groupby("station").size().sort_values(ascending=False)
#most active station
mostActive = df.groupby("station").size().sort_values(ascending=False).index[0]
mostActive
df.loc[df.station == mostActive, "tobs"].reset_index(drop=True).plot(kind="hist")
df_sub = df.loc[df.station == mostActive].reset_index(drop=True)
plt.figure(figsize=(10,6))
plt.hist(df_sub.tobs, bins=12)
plt.title(f"Temperature in Hawaii Over Last 12 Months From Station {mostActive}", fontweight="bold", fontsize=18)
plt.ylabel("Count", fontsize=16)
plt.xlabel("Temperature (F)", fontsize=16)
plt.show()
df_sub.describe()
```
| github_jupyter |
# Interactome Construction and Analysis
Get data from local database and create the interactome
```
#Include libraries
import MySQLdb
import networkx as nx
from matplotlib import pylab as plt
import numpy as np
%matplotlib inline
def get_ppi(lcc):
'''
Main function to extract the PPI from our local database.
Connect to GenesGO Database and extract PPI edges from
'''
# Open database connection
db = MySQLdb.connect("<menchelab_server>", "readonly", "<MencheLabPW>", "GenesGO")
# prepare a cursor object using cursor() method
cursor = db.cursor()
sql = """
SELECT
e.entrez_1,
e.entrez_2,
g1.Locus_Type,
g1.Locus_Group,
g2.Locus_Type,
g2.Locus_Group
FROM networks.PPI_hippie2017 e
INNER JOIN GenesGO.hgnc_complete g1 ON e.entrez_1 = g1.Entrez_Gene_ID_NCBI
INNER JOIN GenesGO.hgnc_complete g2 ON e.entrez_2 = g2.Entrez_Gene_ID_NCBI
WHERE
(e.author != '' AND e.entrez_1 != e.entrez_2
AND g1.Locus_Type = 'T cell receptor gene' AND g2.Locus_Type = 'T cell receptor gene') # 0 links
OR (e.author != '' AND e.entrez_1 != e.entrez_2
AND g1.Locus_Type = 'immunoglobulin gene' AND g2.Locus_Type = 'immunoglobulin gene') # 4 links
OR (e.author != '' AND e.entrez_1 != e.entrez_2
AND g1.Locus_Type = 'immunoglobulin gene' AND g2.Locus_Type = 'T cell receptor gene') # 0 links
OR (e.author != '' AND e.entrez_1 != e.entrez_2
AND g1.Locus_Type = 'T cell receptor gene' AND g2.Locus_Type = 'immunoglobulin gene') # 0 links
OR (e.author != '' AND e.entrez_1 != e.entrez_2
AND g1.Locus_Type = 'T cell receptor gene' AND g2.Locus_Type = 'gene with protein product') # 17 links
OR (e.author != '' AND e.entrez_1 != e.entrez_2
AND g1.Locus_Type = 'gene with protein product' AND g2.Locus_Type = 'T cell receptor gene') # 1 links
OR (e.author != '' AND e.entrez_1 != e.entrez_2
AND g1.Locus_Type = 'immunoglobulin gene' AND g2.Locus_Type = 'gene with protein product') # 115 links
OR (e.author != '' AND e.entrez_1 != e.entrez_2
AND g1.Locus_Type = 'gene with protein product' AND g2.Locus_Type = 'immunoglobulin gene') # 295 links
OR (e.author != '' AND e.entrez_1 != e.entrez_2
AND g1.Locus_Type = 'gene with protein product' AND g2.Locus_Type = 'gene with protein product') # 309602 links
"""
try:
# execute SQL query
cursor.execute(sql)
data = cursor.fetchall()
except:
print('SQL error')
db.close()
l_nodes = []
for x in data:
l_nodes.append(x[0])
l_nodes.append(x[1])
l_nodes = list(set(l_nodes))
G = nx.Graph()
G.add_nodes_from(l_nodes)
for x in data:
G.add_edge(x[0], x[1])
print 'PPI All:'
print 'Number of genes found: %d' %len(G.nodes())
print 'Number of interactions found: %d' %len(G.edges())
if lcc == 1:
Nl_l = sorted(nx.connected_components(G)) # generates list of components node lists
l = [len(x) for x in Nl_l] # returns list of length of node lists
idx = l.index(max(l)) # find the index of the maximal length i.e. lcc
Nlcc = Nl_l[idx] # pin down lcc
G_lcc = G.subgraph(Nlcc) # extract lcc graph
G = G_lcc.copy()
print 'PPI Only Largest Connected Component:'
print 'Number of genes found: %d' %len(G.nodes())
print 'Number of interactions found: %d' %len(G.edges())
else:
pass
return G
#1 = get only biggest LCC; Remove single not connected nodes
PPI = get_ppi(1)
nx.write_gml(PPI, '../results/Interactome_Construction_And_Analysis/Human_Interactome.gml')
```
## Degree Distributions
```
#Extract degrees from the PPI
degrees_PPI = [x[1] for x in nx.degree(PPI)]
#Get the unique PPI degree steps
degrees_PPI_unique = list(set(degrees_PPI))
degrees_PPI_unique.sort()
#degree for x axis (sorted from small to biggest)
degreesPPI = []
#Normal distribution (i.e. P(k = x))
degreeDistributionPPI = []
#Cumulative distribution (i.e. P(k > x))
cumulativedegreeDistributionPPI = []
for degree in degrees_PPI_unique:
degreesPPI.append(degree)
degreeDistributionPPI.append(degrees_PPI.count(degree)/float(len(degrees_PPI)))
cumulativedegreeDistributionPPI.append(len([x for x in degrees_PPI if x >= degree]) / float(len(degrees_PPI)))
```
#### Normal degree distribution (log/log scale)
```
plt.scatter(degreesPPI, degreeDistributionPPI, c='#D2323C', alpha=0.4)
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Degree', fontsize=14)
plt.ylabel('P(k)', fontsize=14)
plt.xticks(fontsize=12, rotation=0)
plt.yticks(fontsize=12, rotation=0)
plt.ylim(10 ** -4.1, 1)
plt.xlim(0,1000)
#plt.title('Degree Distribution')
plt.savefig('../results/Interactome_Construction_And_Analysis/Scatter_DegreeDistribution_LogLog.pdf',format='pdf')
plt.show()
plt.close()
```
#### Cumulative degree distribution (log/log scale)
```
plt.scatter(degreesPPI, cumulativedegreeDistributionPPI, c='#D2323C', alpha=0.4)
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Degree', fontsize=14)
plt.ylabel('P(x >= k)', fontsize=14)
plt.xticks(fontsize=12, rotation=0)
plt.yticks(fontsize=12, rotation=0)
plt.ylim(10 ** -4, 1)
#plt.title('Cumulative Degree Distribution')
plt.savefig('../results/Interactome_Construction_And_Analysis/Scatter_CumulativeDegreeDistribution_LogLog.pdf',format='pdf')
plt.show()
plt.close()
```
## Overall PPI stats
```
print 'Number of nodes N: %d' %len(PPI.nodes())
print 'Number of edges M: %d' %len(PPI.edges())
print 'Mean degree <k>: %.2f' %np.mean(degrees_PPI)
print 'Mean clustering <c>: %.2f' %nx.average_clustering(PPI)
print 'Mean average shortest path <l>: %.2f' %nx.average_shortest_path_length(PPI)
print 'Diameter dmax: %.2f' %nx.diameter(PPI)
```
| github_jupyter |
## What is Datashader?
**Datashader turns even the largest datasets into images, faithfully preserving the data's distribution.**
Datashader is an [open-source](https://github.com/bokeh/datashader/) Python 2 and 3 library for analyzing and visualizing large datasets. Specifically, Datashader is designed to "rasterize" or "aggregate" datasets into regular grids that can be analyzed further or viewed as images, making it simple and quick to see the properties and patterns of your data. Datashader can plot a billion points in a second or so on a 16GB laptop, and scales up easily to out-of-core, distributed, or GPU processing for even larger datasets.
This page of the getting-started guide will give a simple example to show how it works, and the following page will show how to use Datashader as a standalone library for generating arrays or images directly
([2-Pipeline](2-Pipeline.ipynb)). Next we'll show how to use Datashader as a component in a larger visualization system like [HoloViews](http://holoviews.org) or [Bokeh](http://bokeh.pydata.org) that provides interactive plots with dynamic zooming, labeled axes, and overlays and layouts ([3-Interactivity](3-Interactivity.ipynb)). More detailed information about each topic is then provided in the [User Guide](../user_guide/).
## Example: NYC taxi trips
To illustrate how this process works, we will demonstrate some of the key features of Datashader using a standard "big-data" example: millions of taxi trips from New York City, USA. First let's import the libraries we are going to use and then read the dataset.
```
import datashader as ds, pandas as pd, colorcet as cc
df = pd.read_csv('../data/nyc_taxi.csv', usecols=['dropoff_x', 'dropoff_y'])
df.head()
```
Here you can see that we have a simple columnar dataset with x and y dropoff locations (in Web Mercator coordinates) for each of the 10 million taxi trips included; other columns were skipped during loading. With Datashader, we can choose what we want to plot on the `x` and `y` axes and see the full data immediately, with no parameter tweaking, magic numbers, subsampling, or approximation, up to the resolution of the display:
```
agg = ds.Canvas().points(df, 'dropoff_x', 'dropoff_y')
ds.tf.set_background(ds.tf.shade(agg, cmap=cc.fire), "black")
```
Here you can immediately see that the data points are aligned to a street grid, that some areas have much more traffic than others, and that the quality of the signal varies spatially (with some areas having blurry patterns that indicate GPS errors, perhaps due to tall buildings). Getting a plot like this with other approaches would take quite a bit of time and effort, but with Datashader it appears in milliseconds without trial and error.
The output above is just a bare image, which is all that Datashader knows how to generate directly. But Datashader can integrate closely with Bokeh, HoloViews, and GeoViews, which makes it simple to allow interactive zooming, axis labeling, overlays and layouts, and complex web apps. For example, making a zoomable interactive overlay on a geographic map requires just a few more lines of code:
```
import holoviews as hv
from holoviews.element.tiles import EsriImagery
from holoviews.operation.datashader import datashade
hv.extension('bokeh')
map_tiles = EsriImagery().opts(alpha=0.5, width=900, height=480, bgcolor='black')
points = hv.Points(df, ['dropoff_x', 'dropoff_y'])
taxi_trips = datashade(points, x_sampling=1, y_sampling=1, cmap=cc.fire, width=900, height=480)
map_tiles * taxi_trips
```
You can select the "Wheel Zoom" tool on the right and then do panning and zooming (with the scroll bar). As long as you have a network connection, the maps will update as you zoom, but the datashaded image will only update if you have a live Python process running. If you do have Python "live", each time you zoom in, the data will be re-aggregated at the new zoom level, converted to an image, and displayed embedded on the map data, making it simple to explore and understand the data.
At the most basic level, Datashader can accept scatterplot points (as above), line segments (for time series, and trajectories), areas (for filled-area plots), polygons (for choropleths), or gridded data (rasters, quadmeshes, and trimeshes to be regridded), and can turn each of these into a regularly sampled array or the corresponding pixel-based image. The rest of this getting-started guide shows how to go from your data to either images or interactive plots, as simply as possible. The next [getting-started section](2_Pipeline.ipynb) breaks down each of the steps taken by Datashader, using a synthetic dataset so that you can see precisely how the data relates to the images. The [user guide](../user_guide/) then explains each of the steps in much more detail.
| github_jupyter |
<h1>Understanding the Computation for Alpha and creating a function</h1>
```
import numpy as np
import matplotlib.pyplot as plt
from scipy import fft
import netCDF4 as nc
import cftime
import matplotlib.animation as animation
%matplotlib widget
# Get Data set from mission due to better notes on when in breaking
mission_nc_path = '../microSWIFT_data/mission_53/mission_53.nc'
mission_dataset = nc.Dataset(mission_nc_path, mode='r')
# Define Accelerations
accel_time = mission_dataset['microSWIFT_31']['IMU']['time']
accel_x = mission_dataset['microSWIFT_31']['IMU']['accel_x']
accel_y = mission_dataset['microSWIFT_31']['IMU']['accel_y']
accel_z = mission_dataset['microSWIFT_31']['IMU']['accel_z']
# Define GPS
gps_time = cftime.num2pydate(mission_dataset['microSWIFT_31']['GPS']['time'], units=mission_dataset['microSWIFT_31']['GPS']['time'].units, calendar=mission_dataset['microSWIFT_31']['GPS']['time'].calendar)
xFRF= mission_dataset['microSWIFT_31']['GPS']['x_frf']
yFRF = mission_dataset['microSWIFT_31']['GPS']['y_frf']
# Sort Data into short windows of 32 samples
samples_per_window = 32 # This value is chosen from the Brown 2018 paper
imu_sampling_freq = 12 # Units are Hz
time_per_window = samples_per_window * (1/imu_sampling_freq) # Time per each window
num_samples = len(accel_x)
num_windows = int(num_samples/samples_per_window)
num_samples_to_use = int(num_windows * samples_per_window)
# Reshape the python array to be windowed
accel_time_windowed = accel_time[:num_samples_to_use].reshape(samples_per_window, num_windows, order='F')
accel_x_windowed = accel_x[:num_samples_to_use].reshape(samples_per_window, num_windows, order='F')
accel_y_windowed = accel_y[:num_samples_to_use].reshape(samples_per_window, num_windows, order='F')
accel_z_windowed = accel_z[:num_samples_to_use].reshape(samples_per_window, num_windows, order='F')
# Find average location of each window
xFRF_windowed = np.empty(num_windows)
yFRF_windowed = np.empty(num_windows)
for n in np.arange(num_windows):
# Find max and min times in each window
window_time_min = cftime.num2pydate(np.min(accel_time_windowed[:,n]), units=mission_dataset['microSWIFT_31']['IMU']['time'].units, calendar=mission_dataset['microSWIFT_31']['IMU']['time'].calendar)
window_time_max = cftime.num2pydate(np.max(accel_time_windowed[:,n]), units=mission_dataset['microSWIFT_31']['IMU']['time'].units, calendar=mission_dataset['microSWIFT_31']['IMU']['time'].calendar)
# Find GPS time indics during these times
gps_ind_in_window = np.squeeze(np.argwhere((gps_time > window_time_min ) & (gps_time < window_time_max)))
xFRF_windowed[n] = np.mean(xFRF[gps_ind_in_window])
yFRF_windowed[n] = np.mean(yFRF[gps_ind_in_window])
# Compute Time of each window
window_time = np.empty(num_windows)
for n in np.arange(num_windows):
window_time[n] = np.mean(accel_time_windowed[:,n])
# Convert times of windows to datetime objects
window_time_datetime = cftime.num2pydate(window_time, units=mission_dataset['microSWIFT_31']['IMU']['time'].units, calendar=mission_dataset['microSWIFT_31']['IMU']['time'].calendar)
# Set up figure properties
fig, [ax1, ax2] = plt.subplots()
ax2.set(ylim=(-25, 25))
ax2.set_xlabel('Sample Number in Window')
ax2.set_ylabel('Acceleration [m/s^2]')
accel_x_line, = ax2.plot(accel_x_windowed[:, 0], color='r', lw=2, label='X')
accel_y_line, = ax2.plot(accel_y_windowed[:, 0], color='b', lw=2, label='Y')
accel_z_line, = ax2.plot(accel_z_windowed[:, 0], color='g', lw=2, label='Z')
ax2.legend()
# def animate(n):
# accel_x_line.set_ydata(accel_x_windowed[:, n])
# accel_y_line.set_ydata(accel_y_windowed[:, n])
# accel_z_line.set_ydata(accel_z_windowed[:, n])
# anim = animation.FuncAnimation(fig, animate, interval=100, frames=num_windows - 1)
# anim.save('./accel_gif.gif')
# plt.show()
accel_time_datetime= cftime.num2pydate(accel_time, units=mission_dataset['microSWIFT_31']['IMU']['time'].units, calendar=mission_dataset['microSWIFT_31']['IMU']['time'].calendar)
fig, ax = plt.subplots()
ax.plot(accel_x)
print(accel_x[:20])
# For each window - compute the variance and add together to get breaker index
alpha = np.empty(num_windows)
window_time = np.empty(num_windows)
fft_freq = fft.fftshift(fft.fftfreq(samples_per_window, d=1/imu_sampling_freq))
ind_freq_greater_than_2hz = np.argwhere(fft_freq > 2)
# Compute Blackman Taper Window function
blackman_window = np.blackman(samples_per_window)
for n in np.arange(num_windows):
# X acceleration
accel_x_demean = accel_x_windowed[:,n] - np.mean(accel_x_windowed[:,n])
blackman_windowed_accel_x = blackman_window * accel_x_demean
accel_x_fft = fft.fftshift((2/samples_per_window) * np.abs(fft.fft(blackman_windowed_accel_x)))
accel_x_fft_greater_than_2hz = np.sum(accel_x_fft[ind_freq_greater_than_2hz])
# Y acceleration
accel_y_demean = accel_y_windowed[:,n] - np.mean(accel_y_windowed[:,n])
blackman_windowed_accel_y = blackman_window * accel_y_demean
accel_y_fft = fft.fftshift((2/samples_per_window) * np.abs(fft.fft(blackman_windowed_accel_y)))
accel_y_fft_greater_than_2hz = np.sum(accel_y_fft[ind_freq_greater_than_2hz])
# Z acceleration
accel_z_demean = accel_z_windowed[:,n] - np.mean(accel_z_windowed[:,n])
blackman_windowed_accel_z = blackman_window * accel_z_demean
accel_z_fft = fft.fftshift((2/samples_per_window) * np.abs(fft.fft(blackman_windowed_accel_z)))
accel_z_fft_greater_than_2hz = np.sum(accel_z_fft[ind_freq_greater_than_2hz])
# Sum all varaince greater than 2 Hz values to compute alpha
alpha[n] = accel_x_fft_greater_than_2hz + accel_y_fft_greater_than_2hz + accel_z_fft_greater_than_2hz
window_time[n] = np.mean(accel_time_windowed[:,n])
# Convert Mean time values to datetimes
window_time_datetime = cftime.num2pydate(window_time, units=mission_dataset['microSWIFT_31']['IMU']['time'].units, calendar=mission_dataset['microSWIFT_31']['IMU']['time'].calendar)
accel_time_datetime = cftime.num2pydate(accel_time, units=mission_dataset['microSWIFT_31']['IMU']['time'].units, calendar=mission_dataset['microSWIFT_31']['IMU']['time'].calendar)
# Plot Alpha Parameters
fig_var, [ax1, ax2] = plt.subplots(2)
# Plot Demeaned accelerations
ax1.plot(accel_time_datetime, accel_x - np.mean(accel_x), label='X (2G Range)')
ax1.plot(accel_time_datetime, accel_y - np.mean(accel_y), label='Y (2G Range)')
ax1.plot(accel_time_datetime, accel_z - np.mean(accel_z), label='Z (2G Range)')
ax1.set_ylabel('Accelerations [m/s^2]')
ax1.legend()
ax1.set_xlabel('Time')
# Plot Alpha Parameter
ax2.plot(window_time_datetime, alpha)
ax2.set_xlabel('Time')
ax2.set_ylabel('alpha [m/s^2]')
# Compute Number of times in this burst that the accelerometer measured at edge of 2G range
max_val_2g = np.max([np.max(np.abs(accel_x[:])), np.max(np.abs(accel_y[:])), np.max(np.abs(accel_z[:]))])
vals_above_2g_x = np.argwhere(np.abs(accel_x[:]) >= max_val_2g).shape[0]
vals_above_2g_y = np.argwhere(np.abs(accel_y[:]) >= max_val_2g).shape[0]
vals_above_2g_z = np.argwhere(np.abs(accel_z[:]) >= max_val_2g).shape[0]
vals_above_2g = vals_above_2g_x + vals_above_2g_y + vals_above_2g_z
print(vals_above_2g)
print(max_val_2g)
# Compute Histogram of each window
num_bins = 20
accel_x_windowed_histogram = np.empty((num_bins, num_windows))
accel_y_windowed_histogram = np.empty((num_bins, num_windows))
accel_z_windowed_histogram = np.empty((num_bins, num_windows))
for n in np.arange(num_windows):
accel_x_windowed_histogram[:,n], _ = np.histogram(accel_x_windowed[:,n], num_bins)
accel_y_windowed_histogram[:,n], _ = np.histogram(accel_y_windowed[:,n], num_bins)
accel_z_windowed_histogram[:,n], _ = np.histogram(accel_z_windowed[:,n], num_bins)
# Fixing random state for reproducibility
np.random.seed(19680801)
# Fixing bin edges
HIST_BINS = np.linspace(-4, 4, 100)
# histogram our data with numpy
data = np.random.randn(1000)
n, _ = np.histogram(data, HIST_BINS)
def prepare_animation(bar_container):
def animate(frame_number):
# simulate new data coming in
data = np.random.randn(1000)
n, _ = np.histogram(data, HIST_BINS)
for count, rect in zip(n, bar_container.patches):
rect.set_height(count)
return bar_container.patches
return animate
# Plot Histogram animation
fig, ax = plt.subplots()
_, _, bar_container = ax.hist(data, HIST_BINS, lw=1,
ec="yellow", fc="green", alpha=0.5)
ax.set_ylim(top=55) # set safe limit to ensure that all data is visible.
ani = animation.FuncAnimation(fig, prepare_animation(bar_container), 50,
repeat=False, blit=True)
plt.show()
```
| github_jupyter |
```
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Vertex client library for Python: Custom training image classification model for batch prediction with explanation
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/official/explainable_ai/gapic-custom_image_classification_batch_explain.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/official/explainable_ai/gapic-custom_image_classification_batch_explain.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
</table>
<br/><br/><br/>
## Overview
This tutorial demonstrates how to use the Vertex client library for Python to train and deploy a custom image classification model for batch prediction with explanation.
### Dataset
The dataset used for this tutorial is the [CIFAR10 dataset](https://www.tensorflow.org/datasets/catalog/cifar10) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). The version of the dataset you will use is built into TensorFlow. The trained model predicts which type of class an image is from ten classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck.
### Objective
In this tutorial, you create a custom trained model, with a training pipeline, from a Python script in a Google prebuilt Docker container using the Vertex client library for Python, and then do a batch prediction with explanations on the uploaded model. Alternatively, you can create custom trained models using `gcloud` command-line tool or online using Cloud Console.
The steps performed include:
- Create a Vertex custom job for training a model.
- Train the TensorFlow model.
- Retrieve and load the model artifacts.
- View the model evaluation.
- Set explanation parameters.
- Upload the model as a Vertex `Model` resource.
- Make a batch prediction with explanations.
### Costs
This tutorial uses billable components of Google Cloud (GCP):
* Vertex AI
* Cloud Storage
Learn about [Vertex AI
pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage
pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
Calculator](https://cloud.google.com/products/calculator/)
to generate a cost estimate based on your projected usage.
## Installation
Install the latest version of Vertex client library for Python.
```
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip install --upgrade google-cloud-aiplatform $USER_FLAG
```
Install the latest GA version of *google-cloud-storage* library as well.
```
! pip install -U google-cloud-storage $USER_FLAG
if os.getenv("IS_TESTING"):
! pip install --upgrade tensorflow $USER_FLAG
if os.getenv("IS_TESTING"):
! apt-get update && apt-get install -y python3-opencv
! pip install --upgrade opencv-python $USER_FLAG
```
### Restart the kernel
Once you've installed the Vertex client library for Python and *google-cloud-storage* library, you need to restart the notebook kernel so it can find the packages.
```
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
```
## Before you begin
### GPU runtime
*Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU**
### Set up your Google Cloud project
**The following steps are required, regardless of your notebook environment.**
1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
3. [Enable the Vertex AI APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)
4. [The Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook.
5. Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$`.
```
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
```
#### Region
You can also change the `REGION` variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
- Americas: `us-central1`
- Europe: `europe-west4`
- Asia Pacific: `asia-east1`
You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.
Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations)
```
REGION = "us-central1" # @param {type: "string"}
```
#### Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
```
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
```
### Authenticate your Google Cloud account
**If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step.
**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
**Otherwise**, follow these steps:
In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.
**Click Create service account**.
In the **Service account name** field, enter a name, and click **Create**.
In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
Click Create. A JSON file that contains your key downloads to your local environment.
Enter the path to your service account key as the `GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell.
```
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your Google Cloud account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your Google Cloud
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
```
### Create a Cloud Storage bucket
**The following steps are required, regardless of your notebook environment.**
When you submit a custom training job using the Vertex client library for Python, you upload a Python package
containing your training code to a Cloud Storage bucket. Vertex runs
the code from this package. In this tutorial, Vertex also saves the
trained model that results from your job in the same bucket. You can then
create an `Endpoint` resource based on this output in order to serve
online predictions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
```
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
```
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
```
! gsutil mb -l $REGION $BUCKET_NAME
```
Finally, validate access to your Cloud Storage bucket by examining its contents:
```
! gsutil ls -al $BUCKET_NAME
```
### Set up variables
Next, set up some variables used throughout the tutorial.
### Import libraries and define constants
#### Import Vertex client library for Python
Import the Vertex client library for Python into your Python environment.
```
import time
import google.cloud.aiplatform_v1 as aip
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
```
#### Vertex AI constants
Setup up the following constants for Vertex AI:
- `API_ENDPOINT`: The Vertex AI service endpoint for dataset, model, job, pipeline and endpoint services.
- `PARENT`: The Vertex AI location root path for `Dataset`, `Model`, `Job`, `Pipeline` and `Endpoint` resources.
```
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex AI location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
```
#### Set hardware accelerators
You can set hardware accelerators for training and prediction.
Set the variables `TRAIN_GPU/TRAIN_NGPU` and `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify:
(aip.AcceleratorType.NVIDIA_TESLA_K80, 4)
Otherwise specify `(None, None)` to use a container image to run on a CPU.
Learn [which accelerators are available in your region](https://cloud.google.com/vertex-ai/docs/general/locations#accelerators).
*Note*: TF releases before 2.3 for GPU support will fail to load the custom trained model in this tutorial. It is a known issue and fixed in TF 2.3 -- which is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom trained models, use a container image for TF 2.3 with GPU support.
```
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
if os.getenv("IS_TESTING_DEPLOY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPLOY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None)
```
#### Set pre-built containers
Set the pre-built Docker container image for training and prediction.
For the latest list, see [Pre-built containers for training](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers).
For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/ai-platform-unified/docs/predictions/pre-built-containers)
```
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU)
```
#### Set machine type
Next, set the machine type to use for training and prediction.
- Set the variables `TRAIN_COMPUTE` and `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for for training and prediction.
- `machine type`
- `n1-standard`: 3.75GB of memory per vCPU.
- `n1-highmem`: 6.5GB of memory per vCPU
- `n1-highcpu`: 0.9 GB of memory per vCPU
- `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]
*Note: The following is not supported for training:*
- `standard`: 2 vCPUs
- `highcpu`: 2, 4 and 8 vCPUs
*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.
Learn [which machine types are available for training](https://cloud.google.com/vertex-ai/docs/training/configure-compute) and [for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute)
```
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
```
# Tutorial
Now you are ready to start creating your own custom model and training for CIFAR10.
## Set up clients
The Vertex client library for Python works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex AI server.
You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront.
- Model Service for `Model` resources.
- Endpoint Service for deployment.
- Job Service for batch jobs and custom training.
- Prediction Service for serving.
```
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_job_client():
client = aip.JobServiceClient(client_options=client_options)
return client
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["job"] = create_job_client()
clients["model"] = create_model_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client)
```
## Train a model
There are two ways you can train a custom model using a container image:
- **Use a Google Cloud prebuilt container**. If you use a prebuilt container, you will additionally specify a Python package to install into the container image. This Python package contains your code for training a custom model.
- **Use your own custom container image**. If you use your own container, the container needs to contain your code for training a custom model.
## Prepare your custom job specification
Now that your clients are ready, your first step is to create a Job Specification for your custom training job. The job specification will consist of the following:
- `worker_pool_spec` : The specification of the type of machine(s) you will use for training and how many (single or distributed)
- `python_package_spec` : The specification of the Python package to be installed with the pre-built container.
### Prepare your machine specification
Now define the machine specification for your custom training job. This tells Vertex AI what type of machine instance to provision for the training.
- `machine_type`: The type of Google Cloud instance to provision -- e.g., n1-standard-8.
- `accelerator_type`: The type, if any, of hardware accelerator. In this tutorial if you previously set the variable `TRAIN_GPU != None`, you are using a GPU; otherwise you will use a CPU.
- `accelerator_count`: The number of accelerators.
```
if TRAIN_GPU:
machine_spec = {
"machine_type": TRAIN_COMPUTE,
"accelerator_type": TRAIN_GPU,
"accelerator_count": TRAIN_NGPU,
}
else:
machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0}
```
### Prepare your disk specification
(optional) Now define the disk specification for your custom training job. This tells Vertex AI what type and size of disk to provision in each machine instance for the training.
- `boot_disk_type`: Either SSD or Standard. SSD is faster, and Standard is less expensive. Defaults to SSD.
- `boot_disk_size_gb`: Size of disk in GB.
```
DISK_TYPE = "pd-ssd" # [ pd-ssd, pd-standard]
DISK_SIZE = 200 # GB
disk_spec = {"boot_disk_type": DISK_TYPE, "boot_disk_size_gb": DISK_SIZE}
```
### Define the worker pool specification
Next, you define the worker pool specification for your custom training job. The worker pool specification will consist of the following:
- `replica_count`: The number of instances to provision of this machine type.
- `machine_spec`: The hardware specification.
- `disk_spec` : (optional) The disk storage specification.
- `python_package`: The Python training package to install on the VM instance(s) and which Python module to invoke, along with command line arguments for the Python module.
Let's dive deeper now into the python package specification:
-`executor_image_spec`: This is the docker image which is configured for your custom training job.
-`package_uris`: This is a list of the locations (URIs) of your python training packages to install on the provisioned instance. The locations need to be in a Cloud Storage bucket. These can be either individual python files or a zip (archive) of an entire package. In the later case, the job service will unzip (unarchive) the contents into the docker image.
-`python_module`: The Python module (script) to invoke for running the custom training job. In this example, you will be invoking `trainer.task.py` -- note that it was not neccessary to append the `.py` suffix.
-`args`: The command line arguments to pass to the corresponding Pythom module. In this example, you will be setting:
- `"--model-dir=" + MODEL_DIR` : The Cloud Storage location where to store the model artifacts. There are two ways to tell the training script where to save the model artifacts:
- direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or
- indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification.
- `"--epochs=" + EPOCHS`: The number of epochs for training.
- `"--steps=" + STEPS`: The number of steps (batches) per epoch.
- `"--distribute=" + TRAIN_STRATEGY"` : The training distribution strategy to use for single or distributed training.
- `"single"`: single device.
- `"mirror"`: all GPU devices on a single compute instance.
- `"multi"`: all GPU devices on all compute instances.
```
JOB_NAME = "custom_job_" + TIMESTAMP
MODEL_DIR = "{}/{}".format(BUCKET_NAME, JOB_NAME)
if not TRAIN_NGPU or TRAIN_NGPU < 2:
TRAIN_STRATEGY = "single"
else:
TRAIN_STRATEGY = "mirror"
EPOCHS = 20
STEPS = 100
DIRECT = True
if DIRECT:
CMDARGS = [
"--model-dir=" + MODEL_DIR,
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
]
else:
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
]
worker_pool_spec = [
{
"replica_count": 1,
"machine_spec": machine_spec,
"disk_spec": disk_spec,
"python_package_spec": {
"executor_image_uri": TRAIN_IMAGE,
"package_uris": [BUCKET_NAME + "/trainer_cifar10.tar.gz"],
"python_module": "trainer.task",
"args": CMDARGS,
},
}
]
```
### Assemble a job specification
Now assemble the complete description for the custom job specification:
- `display_name`: The human readable name you assign to this custom job.
- `job_spec`: The specification for the custom job.
- `worker_pool_specs`: The specification for the machine VM instances.
- `base_output_directory`: This tells the service the Cloud Storage location where to save the model artifacts (when variable `DIRECT = False`). The service will then pass the location to the training script as the environment variable `AIP_MODEL_DIR`, and the path will be of the form:
<output_uri_prefix>/model
```
if DIRECT:
job_spec = {"worker_pool_specs": worker_pool_spec}
else:
job_spec = {
"worker_pool_specs": worker_pool_spec,
"base_output_directory": {"output_uri_prefix": MODEL_DIR},
}
custom_job = {"display_name": JOB_NAME, "job_spec": job_spec}
```
### Examine the training package
#### Package layout
Before you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.
- PKG-INFO
- README.md
- setup.cfg
- setup.py
- trainer
- \_\_init\_\_.py
- task.py
The files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.
The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`).
#### Package Assembly
In the following cells, you will assemble the training package.
```
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: CIFAR10 image classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: aferlitsch@google.com\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
```
#### Task.py contents
In the next cell, you write the contents of the training script task.py. We won't go into detail, it's just there for you to browse. In summary:
- Get the directory where to save the model artifacts from the command line (`--model_dir`), and if not specified, then from the environment variable `AIP_MODEL_DIR`.
- Loads CIFAR10 dataset from TF Datasets (tfds).
- Builds a model using TF.Keras model API.
- Compiles the model (`compile()`).
- Sets a training distribution strategy according to the argument `args.distribute`.
- Trains the model (`fit()`) with epochs and steps according to the arguments `args.epochs` and `args.steps`
- Saves the trained model (`save(args.model_dir)`) to the specified model directory.
```
%%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for CIFAR-10
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv("AIP_MODEL_DIR"), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.01, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=10, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=200, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
print('DEVICES', device_lib.list_local_devices())
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
# Preparing dataset
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def make_datasets_unbatched():
# Scaling CIFAR10 data from (0, 255] to (0., 1.]
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255.0
return image, label
datasets, info = tfds.load(name='cifar10',
with_info=True,
as_supervised=True)
return datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE).repeat()
# Build the Keras model
def build_and_compile_cnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(32, 32, 3)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
loss=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=args.lr),
metrics=['accuracy'])
return model
# Train the model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
train_dataset = make_datasets_unbatched().batch(GLOBAL_BATCH_SIZE)
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_cnn_model()
model.fit(x=train_dataset, epochs=args.epochs, steps_per_epoch=args.steps)
model.save(args.model_dir)
```
#### Store training script on your Cloud Storage bucket
Next, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
```
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_cifar10.tar.gz
```
### Train the model
Now start the training of your custom training job on Vertex AI. Use this helper function `create_custom_job`, which takes the following parameter:
-`custom_job`: The specification for the custom job.
The helper function calls job client service's `create_custom_job` method, with the following parameters:
-`parent`: The Vertex AI location path to `Dataset`, `Model` and `Endpoint` resources.
-`custom_job`: The specification for the custom job.
You will display a handful of the fields returned in `response` object, with the two that are of most interest are:
-`response.name`: The Vertex AI fully qualified identifier assigned to this custom training job. You save this identifier for using in subsequent steps.
-`response.state`: The current state of the custom training job.
```
def create_custom_job(custom_job):
response = clients["job"].create_custom_job(parent=PARENT, custom_job=custom_job)
print("name:", response.name)
print("display_name:", response.display_name)
print("state:", response.state)
print("create_time:", response.create_time)
print("update_time:", response.update_time)
return response
response = create_custom_job(custom_job)
```
Now get the unique identifier for the custom job you created.
```
# The full unique ID for the custom job
job_id = response.name
# The short numeric ID for the custom job
job_short_id = job_id.split("/")[-1]
print(job_id)
```
### Get information on a custom job
Next, use this helper function `get_custom_job`, which takes the following parameter:
- `name`: The Vertex AI fully qualified identifier for the custom job.
The helper function calls the job client service's `get_custom_job` method, with the following parameter:
- `name`: The Vertex AI fully qualified identifier for the custom job.
If you recall, you got the Vertex AI fully qualified identifier for the custom job in the `response.name` field when you called the `create_custom_job` method, and saved the identifier in the variable `job_id`.
```
def get_custom_job(name, silent=False):
response = clients["job"].get_custom_job(name=name)
if silent:
return response
print("name:", response.name)
print("display_name:", response.display_name)
print("state:", response.state)
print("create_time:", response.create_time)
print("update_time:", response.update_time)
return response
response = get_custom_job(job_id)
```
# Deploy the model
Training the above model may take upwards of 20 minutes time.
Once your model is done training, you can calculate the actual time it took to train the model by subtracting `end_time` from `start_time`. For your model, we will need to know the location of the saved model, which the Python script saved in your local Cloud Storage bucket at `MODEL_DIR + '/saved_model.pb'`.
```
while True:
response = get_custom_job(job_id, True)
if response.state != aip.JobState.JOB_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_path_to_deploy = None
if response.state == aip.JobState.JOB_STATE_FAILED:
break
else:
if not DIRECT:
MODEL_DIR = MODEL_DIR + "/model"
model_path_to_deploy = MODEL_DIR
print("Training Job Time:", response.end_time - response.start_time)
print("Training Elapsed Time:", response.update_time - response.create_time)
break
time.sleep(60)
print("model_to_deploy:", model_path_to_deploy)
```
## Load the saved model
Your model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction.
To load, you use the TF.Keras `model.load_model()` method passing it the Cloud Storage path where the model is saved -- specified by `MODEL_DIR`.
```
import tensorflow as tf
model = tf.keras.models.load_model(MODEL_DIR)
```
## Evaluate the model
Now find out how good the model is.
### Load evaluation data
You will load the CIFAR10 test (holdout) data from `tf.keras.datasets`, using the method `load_data()`. This returns the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements: the image data, and the corresponding labels.
You don't need the training data, and hence why we loaded it as `(_, _)`.
Before you can run the data through evaluation, you need to preprocess it:
`x_test`:
1. Normalize (rescale) the pixel data by dividing each pixel by 255. This replaces each single byte integer pixel with a 32-bit floating point number between 0 and 1.
`y_test`:<br/>
2. The labels are currently scalar (sparse). If you look back at the `compile()` step in the `trainer/task.py` script, you will find that it was compiled for sparse labels. So we don't need to do anything more.
```
import numpy as np
from tensorflow.keras.datasets import cifar10
(_, _), (x_test, y_test) = cifar10.load_data()
x_test = (x_test / 255.0).astype(np.float32)
print(x_test.shape, y_test.shape)
```
### Perform the model evaluation
Now evaluate how well the model in the custom job did.
```
model.evaluate(x_test, y_test)
```
## Upload the model for serving
Next, you will upload your TF.Keras model from the custom job to Vertex `Model` service, which will create a Vertex `Model` resource for your custom model. During upload, you need to define a serving function to convert data to the format your model expects. If you send encoded data to Vertex AI, your serving function ensures that the data is decoded on the model server before it is passed as input to your model.
### How does the serving function work
When you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. For Google pre-built prediction containers, the request content is passed to the serving function as a `tf.string`.
The serving function consists of two parts:
- `preprocessing function`:
- Converts the input (`tf.string`) to the input shape and data type of the underlying model (dynamic graph).
- Performs the same preprocessing of the data that was done during training the underlying model, including normalizing and scalingh.
- `post-processing function`:
- Converts the model output to format expected by the receiving application -- e.q., compresses the output.
- Packages the output for the the receiving application, including adding headings, and making JSON objects.
Both the preprocessing and post-processing functions are converted to static graphs which are fused to the model. The output from the underlying model is passed to the post-processing function. The post-processing function passes the converted/packaged output back to the HTTP server. The HTTP server returns the output as the HTTP response content.
One consideration you need to consider when building serving functions for TF.Keras models is that they run as static graphs. That means, you cannot use TF graph operations that require a dynamic graph. If you do, you will get an error during the compile of the serving function which will indicate that you are using an EagerTensor which is not supported.
### Serving function for image data
To pass images to the prediction service, you encode the compressed (e.g., JPEG) image bytes into base 64 -- which makes the content safe from modification while transmitting binary data over the network. Since this deployed model expects input data as raw (uncompressed) bytes, you need to ensure that the base 64 encoded data gets converted back to raw bytes before it is passed as input to the deployed model.
To resolve this, define a serving function (`serving_fn`) and attach it to the model as a preprocessing step. Add a `@tf.function` decorator so the serving function is fused to the underlying model (instead of upstream on a CPU).
When you send a prediction or explanation request, the content of the request is base 64 decoded into a TensorFlow string (`tf.string`), which is passed to the serving function (`serving_fn`). The serving function preprocesses the `tf.string` into raw (uncompressed) numpy bytes (`preprocess_fn`) to match the input requirements of the model:
- `io.decode_jpeg`- Decompresses the JPG image which is returned as a Tensorflow tensor with three channels (RGB).
- `image.convert_image_dtype` - Changes integer pixel values to float 32.
- `image.resize` - Resizes the image to match the input shape for the model.
- `resized / 255.0` - Rescales (normalization) the pixel data between 0 and 1.
At this point, the data can be passed to the model (`m_call`).
#### Vertex AI Explainability Signatures
When the serving function is saved back with the underlying model (`tf.saved_model.save`), you specify the input layer of the serving function as the signature `serving_default`.
For Vertex AI Explainability image models, you need to save two additional signatures from the serving function:
- `xai_preprocess`: The preprocessing function in the serving function.
- `xai_model`: The concrete function for calling the model.
```
CONCRETE_INPUT = "numpy_inputs"
def _preprocess(bytes_input):
decoded = tf.io.decode_jpeg(bytes_input, channels=3)
decoded = tf.image.convert_image_dtype(decoded, tf.float32)
resized = tf.image.resize(decoded, size=(32, 32))
rescale = tf.cast(resized / 255.0, tf.float32)
return rescale
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def preprocess_fn(bytes_inputs):
decoded_images = tf.map_fn(
_preprocess, bytes_inputs, dtype=tf.float32, back_prop=False
)
return {
CONCRETE_INPUT: decoded_images
} # User needs to make sure the key matches model's input
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def serving_fn(bytes_inputs):
images = preprocess_fn(bytes_inputs)
prob = m_call(**images)
return prob
m_call = tf.function(model.call).get_concrete_function(
[tf.TensorSpec(shape=[None, 32, 32, 3], dtype=tf.float32, name=CONCRETE_INPUT)]
)
tf.saved_model.save(
model,
model_path_to_deploy,
signatures={
"serving_default": serving_fn,
# Required for XAI
"xai_preprocess": preprocess_fn,
"xai_model": m_call,
},
)
```
## Get the serving function signature
You can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer.
When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request.
You also need to know the name of the serving function's input and output layer for constructing the explanation metadata -- which is discussed subsequently.
```
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
serving_output = list(loaded.signatures["serving_default"].structured_outputs.keys())[0]
print("Serving function output:", serving_output)
input_name = model.input.name
print("Model input name:", input_name)
output_name = model.output.name
print("Model output name:", output_name)
```
### Explanation Specification
To get explanations when doing a prediction, you must enable the explanation capability and set corresponding settings when you upload your custom model to a Vertex `Model` resource. These settings are referred to as the explanation metadata, which consists of:
- `parameters`: This is the specification for the explainability algorithm to use for explanations on your model. You can choose between:
- Shapley - *Note*, not recommended for image data -- can be very long running
- XRAI
- Integrated Gradients
- `metadata`: This is the specification for how the algoithm is applied on your custom model.
#### Explanation Parameters
Let's first dive deeper into the settings for the explainability algorithm.
#### Shapley
Assigns credit for the outcome to each feature, and considers different permutations of the features. This method provides a sampling approximation of exact Shapley values.
Use Cases:
- Classification and regression on tabular data.
Parameters:
- `path_count`: This is the number of paths over the features that will be processed by the algorithm. An exact approximation of the Shapley values requires M! paths, where M is the number of features. For the CIFAR10 dataset, this would be 784 (28*28).
For any non-trival number of features, this is too compute expensive. You can reduce the number of paths over the features to M * `path_count`.
#### Integrated Gradients
A gradients-based method to efficiently compute feature attributions with the same axiomatic properties as the Shapley value.
Use Cases:
- Classification and regression on tabular data.
- Classification on image data.
Parameters:
- `step_count`: This is the number of steps to approximate the remaining sum. The more steps, the more accurate the integral approximation. The general rule of thumb is 50 steps, but as you increase so does the compute time.
#### XRAI
Based on the integrated gradients method, XRAI assesses overlapping regions of the image to create a saliency map, which highlights relevant regions of the image rather than pixels.
Use Cases:
- Classification on image data.
Parameters:
- `step_count`: This is the number of steps to approximate the remaining sum. The more steps, the more accurate the integral approximation. The general rule of thumb is 50 steps, but as you increase so does the compute time.
In the next code cell, set the variable `XAI` to which explainabilty algorithm you will use on your custom model.
```
XAI = "ig" # [ shapley, ig, xrai ]
if XAI == "shapley":
PARAMETERS = {"sampled_shapley_attribution": {"path_count": 10}}
elif XAI == "ig":
PARAMETERS = {"integrated_gradients_attribution": {"step_count": 50}}
elif XAI == "xrai":
PARAMETERS = {"xrai_attribution": {"step_count": 50}}
parameters = aip.ExplanationParameters(PARAMETERS)
```
#### Explanation Metadata
Let's first dive deeper into the explanation metadata, which consists of:
- `outputs`: A scalar value in the output to attribute -- what to explain. For example, in a probability output \[0.1, 0.2, 0.7\] for classification, one wants an explanation for 0.7. Consider the following formulae, where the output is `y` and that is what we want to explain.
y = f(x)
Consider the following formulae, where the outputs are `y` and `z`. Since we can only do attribution for one scalar value, we have to pick whether we want to explain the output `y` or `z`. Assume in this example the model is object detection and y and z are the bounding box and the object classification. You would want to pick which of the two outputs to explain.
y, z = f(x)
The dictionary format for `outputs` is:
{ "outputs": { "[your_display_name]":
"output_tensor_name": [layer]
}
}
<div style="margin-left: 25px;">
<ul>
<li>[your_display_name]: A human readable name you assign to the output to explain. A common example is "probability".</li>
<li>"output_tensor_name": The key/value field to identify the output layer to explain. </li>
<li>[layer]: The output layer to explain. In a single task model, like a tabular regressor, it is the last (topmost) layer in the model</li>.
</ul>
</div>
- `inputs`: The features for attribution -- how they contributed to the output. Consider the following formulae, where `a` and `b` are the features. We have to pick which features to explain how the contributed. Assume that this model is deployed for A/B testing, where `a` are the data_items for the prediction and `b` identifies whether the model instance is A or B. You would want to pick `a` (or some subset of) for the features, and not `b` since it does not contribute to the prediction.
y = f(a,b)
The minimum dictionary format for `inputs` is:
{ "inputs": { "[your_display_name]":
"input_tensor_name": [layer]
}
}
<div style="margin-left: 25px;">
<ul>
<li>[your_display_name]: A human readable name you assign to the input to explain. A common example is "features".</li>
<li>"input_tensor_name": The key/value field to identify the input layer for the feature attribution. </li>
<li>[layer]: The input layer for feature attribution. In a single input tensor model, it is the first (bottom-most) layer in the model.</li>
</ul>
</div>
Since the inputs to the model are images, you can specify the following additional fields as reporting/visualization aids:
<div style="margin-left: 25px;">
<ul>
<li>"modality": "image": Indicates the field values are image data.</li>
</ul>
</div>
```
random_baseline = np.random.rand(32, 32, 3)
input_baselines = [{"number_vaue": x} for x in random_baseline]
INPUT_METADATA = {"input_tensor_name": CONCRETE_INPUT, "modality": "image"}
OUTPUT_METADATA = {"output_tensor_name": serving_output}
input_metadata = aip.ExplanationMetadata.InputMetadata(INPUT_METADATA)
output_metadata = aip.ExplanationMetadata.OutputMetadata(OUTPUT_METADATA)
metadata = aip.ExplanationMetadata(
inputs={"image": input_metadata}, outputs={"class": output_metadata}
)
explanation_spec = aip.ExplanationSpec(metadata=metadata, parameters=parameters)
```
### Upload the model
Use this helper function `upload_model` to upload your model, stored in SavedModel format, up to the `Model` service, which will instantiate a Vertex `Model` resource instance for your model. Once you've done that, you can use the `Model` resource instance in the same way as any other Vertex `Model` resource instance, such as deploying to an `Endpoint` resource for serving predictions.
The helper function takes the following parameters:
- `display_name`: A human readable name for the `Endpoint` service.
- `image_uri`: The container image for the model deployment.
- `model_uri`: The Cloud Storage path to our SavedModel artificat. For this tutorial, this is the Cloud Storage location where the `trainer/task.py` saved the model artifacts, which we specified in the variable `MODEL_DIR`.
The helper function calls the `Model` client service's method `upload_model`, which takes the following parameters:
- `parent`: The Vertex AI location root path for `Dataset`, `Model` and `Endpoint` resources.
- `model`: The specification for the Vertex `Model` resource instance.
Let's now dive deeper into the Vertex model specification `model`. This is a dictionary object that consists of the following fields:
- `display_name`: A human readable name for the `Model` resource.
- `metadata_schema_uri`: Since your model was built without an Vertex `Dataset` resource, you will leave this blank (`''`).
- `artifact_uri`: The Cloud Storage path where the model is stored in SavedModel format.
- `container_spec`: This is the specification for the Docker container that will be installed on the `Endpoint` resource, from which the `Model` resource will serve predictions. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated.
- `explanation_spec`: This is the specification for enabling explainability for your model.
Uploading a model into a Vertex `Model` resource returns a long running operation, since it may take a few moments. You call response.result(), which is a synchronous call and will return when the Vertex Model resource is ready.
The helper function returns the Vertex AI fully qualified identifier for the corresponding Vertex `Model` instance upload_model_response.model. You will save the identifier for subsequent steps in the variable model_to_deploy_id.
```
IMAGE_URI = DEPLOY_IMAGE
def upload_model(display_name, image_uri, model_uri):
model = aip.Model(
display_name=display_name,
artifact_uri=model_uri,
metadata_schema_uri="",
explanation_spec=explanation_spec,
container_spec={"image_uri": image_uri},
)
response = clients["model"].upload_model(parent=PARENT, model=model)
print("Long running operation:", response.operation.name)
upload_model_response = response.result(timeout=360)
print("upload_model_response")
print(" model:", upload_model_response.model)
return upload_model_response.model
model_to_deploy_id = upload_model(
"cifar10-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy
)
```
### Get `Model` resource information
Now let's get the model information for just your model. Use this helper function `get_model`, with the following parameter:
- `name`: The Vertex AI unique identifier for the `Model` resource.
This helper function calls the Vertex AI `Model` client service's method `get_model`, with the following parameter:
- `name`: The Vertex AI unique identifier for the `Model` resource.
```
def get_model(name):
response = clients["model"].get_model(name=name)
print(response)
get_model(model_to_deploy_id)
```
## Model deployment for batch prediction
Now deploy the trained Vertex `Model` resource you created for batch prediction. This differs from deploying a `Model` resource for online prediction.
For online prediction, you:
1. Create an `Endpoint` resource for deploying the `Model` resource to.
2. Deploy the `Model` resource to the `Endpoint` resource.
3. Make online prediction requests to the `Endpoint` resource.
For batch-prediction, you:
1. Create a batch prediction job.
2. The job service will provision resources for the batch prediction request.
3. The results of the batch prediction request are returned to the caller.
4. The job service will unprovision the resoures for the batch prediction request.
## Send a batch prediction request
Send a batch prediction to your deployed model.
### Get test items
You will use examples out of the test (holdout) portion of the dataset as a test items.
```
test_image_1 = x_test[0]
test_label_1 = y_test[0]
test_image_2 = x_test[1]
test_label_2 = y_test[1]
print(test_image_1.shape)
```
### Prepare the request content
You are going to send the CIFAR10 images as compressed JPG image, instead of the raw uncompressed bytes:
- `cv2.imwrite`: Use openCV to write the uncompressed image to disk as a compressed JPEG image.
- Denormalize the image data from \[0,1) range back to [0,255).
- Convert the 32-bit floating point values to 8-bit unsigned integers.
```
import cv2
cv2.imwrite("tmp1.jpg", (test_image_1 * 255).astype(np.uint8))
cv2.imwrite("tmp2.jpg", (test_image_2 * 255).astype(np.uint8))
```
### Copy test item(s)
For the batch prediction, you will copy the test items over to your Cloud Storage bucket.
```
! gsutil cp tmp1.jpg $BUCKET_NAME/tmp1.jpg
! gsutil cp tmp2.jpg $BUCKET_NAME/tmp2.jpg
test_item_1 = BUCKET_NAME + "/tmp1.jpg"
test_item_2 = BUCKET_NAME + "/tmp2.jpg"
```
### Make the batch input file
Now make a batch input file, which you will store in your local Cloud Storage bucket. The batch input file can only be in JSONL format. For JSONL file, you make one dictionary entry per line for each data item (instance). The dictionary contains the key/value pairs:
- `input_name`: the name of the input layer of the underlying model.
- `'b64'`: A key that indicates the content is base64 encoded.
- `content`: The compressed JPG image bytes as a base64 encoded string.
Each instance in the prediction request is a dictionary entry of the form:
{serving_input: {'b64': content}}
To pass the image data to the prediction service you encode the bytes into base64 -- which makes the content safe from modification when transmitting binary data over the network.
- `tf.io.read_file`: Read the compressed JPG images into memory as raw bytes.
- `base64.b64encode`: Encode the raw bytes into a base64 encoded string.
```
import base64
import json
gcs_input_uri = BUCKET_NAME + "/" + "test.jsonl"
with tf.io.gfile.GFile(gcs_input_uri, "w") as f:
bytes = tf.io.read_file(test_item_1)
b64str = base64.b64encode(bytes.numpy()).decode("utf-8")
data = {serving_input: {"b64": b64str}}
f.write(json.dumps(data) + "\n")
bytes = tf.io.read_file(test_item_2)
b64str = base64.b64encode(bytes.numpy()).decode("utf-8")
data = {serving_input: {"b64": b64str}}
f.write(json.dumps(data) + "\n")
```
### Compute instance scaling
You have several choices on scaling the compute instances for handling your batch prediction requests:
- Single Instance: The batch prediction requests are processed on a single compute instance.
- Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to one.
- Manual Scaling: The batch prediction requests are split across a fixed number of compute instances that you manually specified.
- Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and batch prediction requests are evenly distributed across them.
- Auto Scaling: The batch prediction requests are split across a scaleable number of compute instances.
- Set the minimum (`MIN_NODES`) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions.
The minimum number of compute instances corresponds to the field `min_replica_count` and the maximum number of compute instances corresponds to the field `max_replica_count`, in your subsequent deployment request.
```
MIN_NODES = 1
MAX_NODES = 1
```
### Make batch prediction request
Now that your batch of two test items is ready, let's do the batch request. Use this helper function `create_batch_prediction_job`, with the following parameters:
- `display_name`: The human readable name for the prediction job.
- `model_name`: The Vertex AI fully qualified identifier for the `Model` resource.
- `gcs_source_uri`: The Cloud Storage path to the input file -- which you created above.
- `gcs_destination_output_uri_prefix`: The Cloud Storage path that the service will write the predictions to.
- `parameters`: Additional filtering parameters for serving prediction results.
The helper function calls the job client service's `create_batch_prediction_job` metho, with the following parameters:
- `parent`: The Vertex AI location root path for Dataset, Model and Pipeline resources.
- `batch_prediction_job`: The specification for the batch prediction job.
Let's now dive into the specification for the `batch_prediction_job`:
- `display_name`: The human readable name for the prediction batch job.
- `model`: The Vertex AI fully qualified identifier for the `Model` resource.
- `dedicated_resources`: The compute resources to provision for the batch prediction job.
- `machine_spec`: The compute instance to provision. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated.
- `starting_replica_count`: The number of compute instances to initially provision, which you set earlier as the variable `MIN_NODES`.
- `max_replica_count`: The maximum number of compute instances to scale to, which you set earlier as the variable `MAX_NODES`.
- `model_parameters`: Additional filtering parameters for serving prediction results. No Additional parameters are supported for custom models.
- `input_config`: The input source and format type for the instances to predict.
- `instances_format`: The format of the batch prediction request file: `csv` or `jsonl`.
- `gcs_source`: A list of one or more Cloud Storage paths to your batch prediction requests.
- `output_config`: The output destination and format for the predictions.
- `prediction_format`: The format of the batch prediction response file: `csv` or `jsonl`.
- `gcs_destination`: The output destination for the predictions.
This call is an asychronous operation. You will print from the response object a few select fields, including:
- `name`: The Vertex AI fully qualified identifier assigned to the batch prediction job.
- `display_name`: The human readable name for the prediction batch job.
- `model`: The Vertex AI fully qualified identifier for the Model resource.
- `generate_explanations`: Whether True/False explanations were provided with the predictions (explainability).
- `state`: The state of the prediction job (pending, running, etc).
Since this call will take a few moments to execute, you will likely get `JobState.JOB_STATE_PENDING` for `state`.
```
BATCH_MODEL = "cifar10_batch-" + TIMESTAMP
def create_batch_prediction_job(
display_name,
model_name,
gcs_source_uri,
gcs_destination_output_uri_prefix,
parameters=None,
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
batch_prediction_job = {
"display_name": display_name,
# Format: 'projects/{project}/locations/{location}/models/{model_id}'
"model": model_name,
"model_parameters": json_format.ParseDict(parameters, Value()),
"input_config": {
"instances_format": IN_FORMAT,
"gcs_source": {"uris": [gcs_source_uri]},
},
"output_config": {
"predictions_format": OUT_FORMAT,
"gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix},
},
"dedicated_resources": {
"machine_spec": machine_spec,
"starting_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
},
"generate_explanation": True,
}
response = clients["job"].create_batch_prediction_job(
parent=PARENT, batch_prediction_job=batch_prediction_job
)
print("response")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" model:", response.model)
try:
print(" generate_explanation:", response.generate_explanation)
except:
pass
print(" state:", response.state)
print(" create_time:", response.create_time)
print(" start_time:", response.start_time)
print(" end_time:", response.end_time)
print(" update_time:", response.update_time)
print(" labels:", response.labels)
return response
IN_FORMAT = "jsonl"
OUT_FORMAT = "jsonl"
response = create_batch_prediction_job(
BATCH_MODEL, model_to_deploy_id, gcs_input_uri, BUCKET_NAME
)
```
Now get the unique identifier for the batch prediction job you created.
```
# The full unique ID for the batch job
batch_job_id = response.name
# The short numeric ID for the batch job
batch_job_short_id = batch_job_id.split("/")[-1]
print(batch_job_id)
```
### Get information on a batch prediction job
Use this helper function `get_batch_prediction_job`, with the following paramter:
- `job_name`: The Vertex AI fully qualified identifier for the batch prediction job.
The helper function calls the job client service's `get_batch_prediction_job` method, with the following paramter:
- `name`: The Vertex AI fully qualified identifier for the batch prediction job. In this tutorial, you will pass it the Vertex fully qualified identifier for your batch prediction job -- `batch_job_id`
The helper function will return the Cloud Storage path to where the predictions are stored -- `gcs_destination`.
```
def get_batch_prediction_job(job_name, silent=False):
response = clients["job"].get_batch_prediction_job(name=job_name)
if silent:
return response.output_config.gcs_destination.output_uri_prefix, response.state
print("response")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" model:", response.model)
try: # not all data types support explanations
print(" generate_explanation:", response.generate_explanation)
except:
pass
print(" state:", response.state)
print(" error:", response.error)
gcs_destination = response.output_config.gcs_destination
print(" gcs_destination")
print(" output_uri_prefix:", gcs_destination.output_uri_prefix)
return gcs_destination.output_uri_prefix, response.state
predictions, state = get_batch_prediction_job(batch_job_id)
```
### Get the predictions
When the batch prediction is done processing, the job state will be `JOB_STATE_SUCCEEDED`.
Finally you view the predictions stored at the Cloud Storage path you set as output. The predictions will be in a JSONL format, which you indicated at the time you made the batch prediction job. The predictions are in a subdirectory starting with the name `prediction`. Within that subdirectory there is a file named `prediction.results-xxxxx-of-xxxxx`.
Now display (cat) the contents. You will see multiple JSON objects, one for each prediction.
Finally you view the explanations stored at the Cloud Storage path you set as output. The explanations will be in a JSONL format, which you indicated at the time you made the batch explanation job. The explanations are in a subdirectory starting with the name `prediction`. Within that subdirectory there is a file named `explanations.results-xxxxx-of-xxxxx`.
Let's display (cat) the contents. You will a row for each prediction -- in this case, there is just one row. The row is the softmax probability distribution for the corresponding CIFAR10 classes.
```
def get_latest_predictions(gcs_out_dir):
"""Get the latest prediction subfolder using the timestamp in the subfolder name"""
folders = !gsutil ls $gcs_out_dir
latest = ""
for folder in folders:
subfolder = folder.split("/")[-2]
if subfolder.startswith("prediction-"):
if subfolder > latest:
latest = folder[:-1]
return latest
while True:
predictions, state = get_batch_prediction_job(batch_job_id, True)
if state != aip.JobState.JOB_STATE_SUCCEEDED:
print("The job has not completed:", state)
if state == aip.JobState.JOB_STATE_FAILED:
break
else:
folder = get_latest_predictions(predictions)
! gsutil ls $folder/explanation.results*
print("Results:")
! gsutil cat $folder/explanation.results*
print("Errors:")
! gsutil cat $folder/prediction.errors*
break
time.sleep(60)
```
# Cleaning up
To clean up all Google Cloud resources used in this project, you can [delete the GCP
project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
- Dataset
- Pipeline
- Model
- Endpoint
- Batch Job
- Custom Job
- Hyperparameter Tuning Job
- Cloud Storage Bucket
```
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import missingno as msno
import matplotlib.pyplot as plt
```
### Load training data (from the starter notebook)
```
# this bit thanks to Brendon Hall
s3_train_csv = 's3://zarr-depot/wells/FORCE: Machine Predicted Lithology/train.csv'
data = pd.read_csv(s3_train_csv, sep=';')
data.sample(10)
```
### Inspecting available logs and interpretations (from the starter notebook)
The data contains the metadata columns
* WELL: well name
* DEPTH_MD: measured depth
* X_LOC: UTM X coordinate
* Y_LOC: UTM Y coordinate
* Z_LOC: DEPTH
* GROUP: NPD lithostratigraphy group
* FORMATION: NPD lithostratgraphy formation
The data contains the well log curves. For example the following:
* BS: Bit Size
* CALI: Caliper
* RDEP: Deep Resistivity
* RHOB: Bulk Density
* GR: Raw gamma data
* SGR: Spectral Gamma Ray
* RMED: Medium Resistivity
* ROP: Rate of Penetration
* NPHI: Neutron Porosity
* PEF: Photoelectric Absorption Factor
* RSHA: Shallow Resistivity
* DTS: Sonic (Sheer Slowness)
* DTC: Sonic (Compressional Slowness)
See contest page for full list.
as well as the interpretation
* FORCE_2020_LITHOFACIES_LITHOLOGY: lithology class label
* FORCE_2020_LITHOFACIES_CONFIDENCE: confidence in lithology interpretation (1: high, 2: medium, 3: low)
There is a total of 83 wells in the training dataset. The `WELL` column is included so the data can be separated per well. This will be necessary for any "windowed" approaches where we use non-local information in the ML features.
```
#wells = data['WELL'].unique()
#wells
#len(wells)
```
### Missing logs (from the starter notebook)
**Notice that some of the curves are NaN. This is an imporant aspect of this dataset and of this competition.** The only log columns that are **guaranteed** to be present are WELL, DEPT, and GR. All other logs can (and will) be missing from some parts of the dataset.
```
data.isna().any()
```
The following plot shows how large a percentage of training data wells contain at least some depth interval with the given logs. As you can see a couple of logs like RT and DTS are present in less than half of the training wells. **Remember, the test data will have a similar distribution of available logs**
```
occurences = np.zeros(25)
for well in data['WELL'].unique():
occurences += data[data['WELL'] == well].isna().all().astype(int).values[2:-2]
fig, ax = plt.subplots(1, 1, figsize=(14, 7))
ax.bar(x=np.arange(occurences.shape[0]), height=(data.WELL.unique().shape[0]-occurences)/data.WELL.unique().shape[0]*100.0)
ax.set_xticklabels(data.columns[2:-2], rotation=45)
ax.set_xticks(np.arange(occurences.shape[0]))
ax.set_ylabel('Well presence (\%)')
```
## New visualization
### Ineractive visualization of missing logs
```
import ipywidgets as widgets
from ipywidgets import interactive, interact
wells = data['WELL'].unique()
wells
```
Using the `@interact` decorator we can get a quick way to choose a single well and inspect its data on the fly by just ttyping the well name:
```
@interact
def data_well_view(WELL=''):
return data[data['WELL']==WELL]
```
With `interactive` and a similar logic (`data['WELL'].unique()`) we can, on the other hand, check the selected well's curves completeness, again on the fly.
I am using the library `missingno` to make the visual summary plot.
As explained in the library's documentation, the sparkline at the right of the plot summarizes the general shape of the data completeness and points out the rows with the maximum and minimum nullity in the dataset.
This to me is a much more compelling and informative way to inspect the data as it shows the data range where data is missing.
The plot height is proportional to the well length (in rows), which is also annotated on the bottom left.
```
well_items = data['WELL'].unique()
def plot_missingno(w):
msno.matrix(w, color=(0., 0., 0.45))
fig = plt.gcf()
fig.set_size_inches(20, np.round(len(w)/750)) # plot heigth proportional to selected well's length (in rows)
plt.show()
def data_missingno_view(WELL=''):
return plot_missingno(data.loc[data['WELL']==WELL])
well_select = widgets.Select(options=well_items)
interactive(data_missingno_view, WELL=well_select)
```
### Ineractive well plotting based on plotting routine from the starter notebook
We can reuse the mechanism from above to interactively select a well to plot:
```
def plot_well(w):
fig, axs = plt.subplots(1, len(w.columns)-9, sharey=True)
fig.set_size_inches(20, np.round(len(w)/750))
for ic, col in enumerate(set(w.columns)-set(['DEPTH_MD', 'FORCE_2020_LITHOFACIES_LITHOLOGY',
'FORCE_2020_LITHOFACIES_CONFIDENCE', 'WELL', 'GROUP', 'FORMATION',
'X_LOC', 'Y_LOC', 'Z_LOC'])):
axs[ic].plot(w[col], w['DEPTH_MD'])
axs[ic].set_xlabel(col)
axs[0].set_ylim(w['DEPTH_MD'].values[-1], w['DEPTH_MD'].values[0])
plt.show()
def well_plot_view(WELL=''):
return plot_well(data.loc[data['WELL']==WELL])
well_select = widgets.Select(options=well_items)
interactive(well_plot_view, WELL=well_select)
```
### Ineractive well plotting based on plotting routine from the Cegal notebook
To finish up, a very nice plot using the Well Plotter from the [Cegal Tools package](https://github.com/cegaltools/cegaltools) showcased [here](https://nbviewer.jupyter.org/urls/gitlab.com/hilde.tveit.haland/public-notebooks/-/raw/master/Force%202020-%20CegalWells%20EDA.ipynb):
```
from cegaltools.plotting import CegalWellPlotter as cwp
data['FORCE_2020_LITHOFACIES_CONFIDENCE'] = 1/data['FORCE_2020_LITHOFACIES_CONFIDENCE']
def plot_well_cegal(w):
cwp.plot_logs(df=w.set_index('DEPTH_MD'),
logs=['GROUP','FORMATION', 'RHOB', 'GR', 'NPHI', 'DTC', 'DTS'],
log_scale_logs=['RMED', 'RDEP'],
lithology_logs='FORCE_2020_LITHOFACIES_LITHOLOGY',
lithology_proba_logs='FORCE_2020_LITHOFACIES_CONFIDENCE')
def well_plot_cegal_view(WELL=''):
return plot_well_cegal(data.loc[data['WELL']==WELL])
well_select = widgets.Select(options=well_items)
interactive(well_plot_cegal_view, WELL=well_select)
```
| github_jupyter |
# QUESTIONS TO SUBJECT CLASSIFICATION
### Link to the Dataset: [Questions Data](https://www.kaggle.com/mrutyunjaybiswal/iitjee-neet-aims-students-questions-data)
### Importing Libraries
```
import pandas as pd
from sklearn import preprocessing
import nltk
nltk.download('stopwords') # download the stopwords from NLTK
import re # library for regular expression operations
import string # for string operations
from nltk.corpus import stopwords # module for stop words that come with NLTK
from nltk.stem import PorterStemmer # module for stemming
from nltk.tokenize import TweetTokenizer # module for tokenizing strings
from sklearn.tree import DecisionTreeClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt # library for visualization
import seaborn as sns
```
### Getting our Data
```
df = pd.read_csv(r'subjects-questions.csv')
df
```
### Data Preprocessing
```
df.isnull().any() # checking for null values if any
label_encoder = preprocessing.LabelEncoder() # label encoding for 'Label' column
df['Subject'] = label_encoder.fit_transform(df['Subject']) # label encoding column - MSZoning for an example
df
```
### Performing steps in NLP
```
def process_q(q):
"""Process question function.
Input:
q: a string containing the question
Output:
q_clean: a list of words containing the processed question
"""
stemmer = PorterStemmer()
stopwords_english = stopwords.words('english')
# tokenize reviews
tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True)
q_tokens = tokenizer.tokenize(q)
q_clean = []
for word in q_tokens:
if (word not in stopwords_english and # remove stopwords
word not in string.punctuation): # remove punctuation
# mail_clean.append(word)
stem_word = stemmer.stem(word) # stemming word
q_clean.append(stem_word)
return q_clean
# using the process_q function for:
# 1. Removing stop words
# 2. Tokenization
# 3. Stemming
A = []
a = df['eng']
for i in a:
i = process_q(i)
A.append(i)
df['eng'] = A
df
```
### Vectorizing
```
cv = CountVectorizer(max_features=1500, analyzer='word', lowercase=False)
df['eng'] = df['eng'].apply(lambda x: " ".join(x) ) # to join all words in the lists
X = cv.fit_transform(df['eng']) # predictor variable 'X'
df
y = pd.DataFrame(df['Subject']) # respose variable 'y'
y.head()
```
### Data Visualization
```
# checking the distribution of outcomes
sns.countplot(x = 'Subject', data = df)
```
### Splitting for Training and Testing
```
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state = 0) # splitting in the ratio 80:20
```
### Model
```
classifier = DecisionTreeClassifier(random_state = 0)
classifier.fit(X_train, y_train)
```
### Making Predictions and Checking Accuracy
```
y_pred = classifier.predict(X_test)
classifier.score(X_test, y_test)
```
# Predictions are 84.28% accurate.
### Results' Visualization
```
cm = confusion_matrix(y_test, y_pred)
cm
plt.figure(figsize=(6,6))
sns.heatmap(cm, annot=True, fmt=".0f", linewidths=0.5, square = True, cmap = 'Pastel1')
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
plt.show()
```
| github_jupyter |
```
from matplotlib import pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
```
# Task 1
Данные:
```
urea = np.array([0, 3e-4, 5e-4, 1e-3, 2e-3, 3e-3, 5e-3])
mid_speed = np.array([0, 0.5, 0.77, 1.2, 1.57, 1.8, 1.9])
delta_speed = np.array([0, 0.05, 0.06, 0.08, 0.08, 0.09, 0.2])
```
График зависиомти скорости выделения аммиака от концентрации мочевины при её гидролизе:
```
plt.plot(urea, mid_speed, "k.-")
plt.fill_between(urea, mid_speed - delta_speed,
mid_speed + delta_speed, color="lime")
plt.xlabel("Concentration")
plt.ylabel("Speed")
plt.grid()
plt.show()
```
Воспользуемся графическим методом, будем считать, что $V_{max} = 2.5$
```
v_max = 2.5
for i, speed in enumerate(mid_speed):
if speed * 2 > v_max:
k_m = (urea[i] - urea[i - 1]) /\
(mid_speed[i] - mid_speed[i - 1]) *\
(v_max / 2 - mid_speed[i - 1]) + urea[i - 1]
break
print("V_max = {}, K_m = {}".format(v_max, k_m))
```
Воспользуемся методом Лайнуивера-Берка:
```
def trend_line(t, a, b):
return a + b * t
a, b = curve_fit(lambda t, a, b: trend_line(t, a, b), 1 / urea[1:],
1 / mid_speed[1:])[0]
plt.errorbar(1 / urea[1:], 1 / mid_speed[1:],
yerr=1 / mid_speed[1:] - 1 /
(mid_speed[1:] - delta_speed[1:]), fmt='.')
x = np.linspace(-1500, 3500, 100)
plt.plot(x, trend_line(x, a, b))
plt.grid()
plt.xlabel("1 / [S]")
plt.ylabel("1 / V")
plt.show()
v_max = 1 / a
k_m = np.tan(b) * v_max
print("V_max = {}, K_m = {}".format(v_max, k_m))
```
Метод Хайнса-Вульфа
```
def trend_line(t, a, b):
return a + b * t
a, b = curve_fit(lambda t, a, b: trend_line(t, a, b), urea[1:],
urea[1:] / mid_speed[1:])[0]
plt.errorbar(urea[1:], urea[1:] / mid_speed[1:],
yerr=urea[1:] * (1 / mid_speed[1:] - 1 /
(mid_speed[1:] - delta_speed[1:])), fmt='.')
x = np.linspace(-2e-3, 1e-2, 100)
plt.plot(x, trend_line(x, a, b))
plt.grid()
plt.xlabel("[S]")
plt.ylabel("[S] / V")
plt.show()
v_max = 1 / np.tan(b)
k_m = a * v_max
print("V_max = {}, K_m = {}".format(v_max, k_m))
```
Метод Иди-Хофсти
```
def trend_line(t, a, b):
return a + b * t
a, b = curve_fit(lambda t, a, b: trend_line(t, a, b),
mid_speed[1:] / urea[1:],
mid_speed[1:])[0]
plt.errorbar(mid_speed[1:] / urea[1:], mid_speed[1:],
xerr=delta_speed[1:] / urea[1:], fmt='.')
x = np.linspace(-100, 2500, 100)
plt.plot(x, trend_line(x, a, b))
plt.grid()
plt.xlabel("V / [S]")
plt.ylabel("V")
plt.show()
v_max = a
k_m = np.tan(-b)
print("V_max = {}, K_m = {}".format(v_max, k_m))
```
Метод Эйзенталя и Корниш-Боудена.
```
plt.figure(figsize=(8, 8))
for i in [1, 2, 3, 4, 5, 6]:
x = np.array([0, 0.002])
plt.plot(x, mid_speed[i] + mid_speed[i] / urea[i] * x)
plt.grid()
plt.show()
k_m = 0.122
v_max = 2.54
print("V_max = {}, K_m = {}".format(v_max, k_m))
```
# Task 2
```
s = np.array([0.3, 0.5, 1, 3, 9]) * 1e-5
speed0 = np.array([10.4, 14.5, 22.5, 33.8, 40.5])
speed1 = np.array([4.1, 6.4, 11.3, 22.6, 33.8])
speed2 = np.array([2.1, 2.9, 4.5, 6.8, 8.1])
plt.plot(s, speed0, '.-', label="без ингибитора")
plt.plot(s, speed1, '.-', label="ингибитор 1")
plt.plot(s, speed2, '.-', label="ингибитор 2")
plt.xlabel("concentration")
plt.ylabel("speed")
plt.grid()
plt.legend()
plt.show()
def trend_line(t, a, b):
return a + b * t
a0, b0 = curve_fit(lambda t, a, b: trend_line(t, a, b), 1 / s,
1 / speed0)[0]
a1, b1 = curve_fit(lambda t, a, b: trend_line(t, a, b), 1 / s,
1 / speed1)[0]
a2, b2 = curve_fit(lambda t, a, b: trend_line(t, a, b), 1 / s,
1 / speed2)[0]
plt.plot(1 / s, 1 / speed0, 'r.', label="без ингибитора")
plt.plot(1 / s, 1 / speed1, 'g.', label="ингибитор 1")
plt.plot(1 / s, 1 / speed2, 'b.', label="ингибитор 2")
x = np.linspace(-100000, 350000, 100)
plt.plot(x, a0 + x * b0, 'r-')
plt.plot(x, a1 + x * b1, 'g-')
plt.plot(x, a2 + x * b2, 'b-')
plt.grid()
plt.xlabel("1 / [S]")
plt.ylabel("1 / V")
plt.legend()
plt.show()
v_max = 1 / a0
k_m = np.tan(b0) * v_max
print("без ингибитора V_max = {}, K_m = {}".format(v_max, k_m))
v_max = 1 / a1
k_m = np.tan(b1) * v_max
print("ингибитор 1 V_max = {}, K_m = {}".format(v_max, k_m))
v_max = 1 / a2
k_m = np.tan(b2) * v_max
print("ингибитор 2 V_max = {}, K_m = {}".format(v_max, k_m))
```
| github_jupyter |
# Time Series Prediction
**Objectives**
1. Build a linear, DNN and CNN model in Keras.
2. Build a simple RNN model and a multi-layer RNN model in Keras.
In this lab we will with a linear, DNN and CNN model
Since the features of our model are sequential in nature, we'll next look at how to build various RNN models in Keras. We'll start with a simple RNN model and then see how to create a multi-layer RNN in Keras.
We will be exploring a lot of different model types in this notebook.
```
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
!pip install --user google-cloud-bigquery==1.25.0
```
**Note**: Restart your kernel to use updated packages.
Kindly ignore the deprecation warnings and incompatibility errors related to google-cloud-storage.
## Load necessary libraries and set up environment variables
```
PROJECT = "your-gcp-project-here" # REPLACE WITH YOUR PROJECT NAME
BUCKET = "your-gcp-bucket-here" # REPLACE WITH YOUR BUCKET
REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
%env
PROJECT = PROJECT
BUCKET = BUCKET
REGION = REGION
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from google.cloud import bigquery
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (Dense, DenseFeatures,
Conv1D, MaxPool1D,
Reshape, RNN,
LSTM, GRU, Bidirectional)
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
from tensorflow.keras.optimizers import Adam
# To plot pretty figures
%matplotlib inline
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# For reproducible results.
from numpy.random import seed
seed(1)
tf.random.set_seed(2)
```
## Explore time series data
We'll start by pulling a small sample of the time series data from Big Query and write some helper functions to clean up the data for modeling. We'll use the data from the `percent_change_sp500` table in BigQuery. The `close_values_prior_260` column contains the close values for any given stock for the previous 260 days.
```
%%time
bq = bigquery.Client(project=PROJECT)
bq_query = '''
#standardSQL
SELECT
symbol,
Date,
direction,
close_values_prior_260
FROM
`stock_market.eps_percent_change_sp500`
LIMIT
100
'''
```
The function `clean_data` below does three things:
1. First, we'll remove any inf or NA values
2. Next, we parse the `Date` field to read it as a string.
3. Lastly, we convert the label `direction` into a numeric quantity, mapping 'DOWN' to 0, 'STAY' to 1 and 'UP' to 2.
```
def clean_data(input_df):
"""Cleans data to prepare for training.
Args:
input_df: Pandas dataframe.
Returns:
Pandas dataframe.
"""
df = input_df.copy()
# Remove inf/na values.
real_valued_rows = ~(df == np.inf).max(axis=1)
df = df[real_valued_rows].dropna()
# TF doesn't accept datetimes in DataFrame.
df['Date'] = pd.to_datetime(df['Date'], errors='coerce')
df['Date'] = df['Date'].dt.strftime('%Y-%m-%d')
# TF requires numeric label.
df['direction_numeric'] = df['direction'].apply(lambda x: {'DOWN': 0,
'STAY': 1,
'UP': 2}[x])
return df
```
## Read data and preprocessing
Before we begin modeling, we'll preprocess our features by scaling to the z-score. This will ensure that the range of the feature values being fed to the model are comparable and should help with convergence during gradient descent.
```
STOCK_HISTORY_COLUMN = 'close_values_prior_260'
COL_NAMES = ['day_' + str(day) for day in range(0, 260)]
LABEL = 'direction_numeric'
def _scale_features(df):
"""z-scale feature columns of Pandas dataframe.
Args:
features: Pandas dataframe.
Returns:
Pandas dataframe with each column standardized according to the
values in that column.
"""
avg = df.mean()
std = df.std()
return (df - avg) / std
def create_features(df, label_name):
"""Create modeling features and label from Pandas dataframe.
Args:
df: Pandas dataframe.
label_name: str, the column name of the label.
Returns:
Pandas dataframe
"""
# Expand 1 column containing a list of close prices to 260 columns.
time_series_features = df[STOCK_HISTORY_COLUMN].apply(pd.Series)
# Rename columns.
time_series_features.columns = COL_NAMES
time_series_features = _scale_features(time_series_features)
# Concat time series features with static features and label.
label_column = df[LABEL]
return pd.concat([time_series_features,
label_column], axis=1)
```
### Make train-eval-test split
Next, we'll make repeatable splits for our train/validation/test datasets and save these datasets to local csv files. The query below will take a subsample of the entire dataset and then create a 70-15-15 split for the train/validation/test sets.
```
def _create_split(phase):
"""Create string to produce train/valid/test splits for a SQL query.
Args:
phase: str, either TRAIN, VALID, or TEST.
Returns:
String.
"""
floor, ceiling = '2002-11-01', '2010-07-01'
if phase == 'VALID':
floor, ceiling = '2010-07-01', '2011-09-01'
elif phase == 'TEST':
floor, ceiling = '2011-09-01', '2012-11-30'
return '''
WHERE Date >= '{0}'
AND Date < '{1}'
'''.format(floor, ceiling)
def create_query(phase):
"""Create SQL query to create train/valid/test splits on subsample.
Args:
phase: str, either TRAIN, VALID, or TEST.
sample_size: str, amount of data to take for subsample.
Returns:
String.
"""
basequery = """
#standardSQL
SELECT
symbol,
Date,
direction,
close_values_prior_260
FROM
`stock_market.eps_percent_change_sp500`
"""
return basequery + _create_split(phase)
```
## Modeling
For experimentation purposes, we'll train various models using data we can fit in memory using the `.csv` files we created above.
```
N_TIME_STEPS = 260
N_LABELS = 3
Xtrain = pd.read_csv('stock-train.csv')
Xvalid = pd.read_csv('stock-valid.csv')
ytrain = Xtrain.pop(LABEL)
yvalid = Xvalid.pop(LABEL)
ytrain_categorical = to_categorical(ytrain.values)
yvalid_categorical = to_categorical(yvalid.values)
```
To monitor training progress and compare evaluation metrics for different models, we'll use the function below to plot metrics captured from the training job such as training and validation loss or accuracy.
```
def plot_curves(train_data, val_data, label='Accuracy'):
"""Plot training and validation metrics on single axis.
Args:
train_data: list, metrics obtrained from training data.
val_data: list, metrics obtained from validation data.
label: str, title and label for plot.
Returns:
Matplotlib plot.
"""
plt.plot(np.arange(len(train_data)) + 0.5,
train_data,
"b.-", label="Training " + label)
plt.plot(np.arange(len(val_data)) + 1,
val_data, "r.-",
label="Validation " + label)
plt.gca().xaxis.set_major_locator(mpl.ticker.MaxNLocator(integer=True))
plt.legend(fontsize=14)
plt.xlabel("Epochs")
plt.ylabel(label)
plt.grid(True)
```
### Baseline
Before we begin modeling in Keras, let's create a benchmark using a simple heuristic. Let's see what kind of accuracy we would get on the validation set if we predict the majority class of the training set.
```
sum(yvalid == ytrain.value_counts().idxmax()) / yvalid.shape[0]
```
Ok. So just naively guessing the most common outcome `UP` will give about 29.5% accuracy on the validation set.
### Linear model
We'll start with a simple linear model, mapping our sequential input to a single fully dense layer.
```
model = Sequential()
model.add(Dense(units=N_LABELS,
activation='softmax',
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.compile(optimizer=Adam(lr=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=30,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
```
The accuracy seems to level out pretty quickly. To report the accuracy, we'll average the accuracy on the validation set across the last few epochs of training.
```
np.mean(history.history['val_accuracy'][-5:])
```
### Deep Neural Network
The linear model is an improvement on our naive benchmark. Perhaps we can do better with a more complicated model. Next, we'll create a deep neural network with Keras. We'll experiment with a two layer DNN here but feel free to try a more complex model or add any other additional techniques to try an improve your performance.
```
dnn_hidden_units = [16, 8]
model = Sequential()
for layer in dnn_hidden_units:
model.add(Dense(units=layer,
activation="relu"))
model.add(Dense(units=N_LABELS,
activation="softmax",
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.compile(optimizer=Adam(lr=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=10,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
```
### Convolutional Neural Network
The DNN does slightly better. Let's see how a convolutional neural network performs.
A 1-dimensional convolutional can be useful for extracting features from sequential data or deriving features from shorter, fixed-length segments of the data set. Check out the documentation for how to implement a [Conv1d in Tensorflow](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv1D). Max pooling is a downsampling strategy commonly used in conjunction with convolutional neural networks. Next, we'll build a CNN model in Keras using the `Conv1D` to create convolution layers and `MaxPool1D` to perform max pooling before passing to a fully connected dense layer.
```
model = Sequential()
# Convolutional layer
model.add(Reshape(target_shape=[N_TIME_STEPS, 1]))
model.add(Conv1D(filters=5,
kernel_size=5,
strides=2,
padding="valid",
input_shape=[None, 1]))
model.add(MaxPool1D(pool_size=2,
strides=None,
padding='valid'))
# Flatten the result and pass through DNN.
model.add(tf.keras.layers.Flatten())
model.add(Dense(units=N_TIME_STEPS//4,
activation="relu"))
model.add(Dense(units=N_LABELS,
activation="softmax",
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.compile(optimizer=Adam(lr=0.01),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=10,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
```
### Recurrent Neural Network
RNNs are particularly well-suited for learning sequential data. They retain state information from one iteration to the next by feeding the output from one cell as input for the next step. In the cell below, we'll build a RNN model in Keras. The final state of the RNN is captured and then passed through a fully connected layer to produce a prediction.
```
model = Sequential()
# Reshape inputs to pass through RNN layer.
model.add(Reshape(target_shape=[N_TIME_STEPS, 1]))
model.add(LSTM(N_TIME_STEPS // 8,
activation='relu',
return_sequences=False))
model.add(Dense(units=N_LABELS,
activation='softmax',
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
# Create the model.
model.compile(optimizer=Adam(lr=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=40,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
```
### Multi-layer RNN
Next, we'll build multi-layer RNN. Just as multiple layers of a deep neural network allow for more complicated features to be learned during training, additional RNN layers can potentially learn complex features in sequential data. For a multi-layer RNN the output of the first RNN layer is fed as the input into the next RNN layer.
```
rnn_hidden_units = [N_TIME_STEPS // 16,
N_TIME_STEPS // 32]
model = Sequential()
# Reshape inputs to pass through RNN layer.
model.add(Reshape(target_shape=[N_TIME_STEPS, 1]))
for layer in rnn_hidden_units[:-1]:
model.add(GRU(units=layer,
activation='relu',
return_sequences=True))
model.add(GRU(units=rnn_hidden_units[-1],
return_sequences=False))
model.add(Dense(units=N_LABELS,
activation="softmax",
kernel_regularizer=tf.keras.regularizers.l1(l=0.1)))
model.compile(optimizer=Adam(lr=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(x=Xtrain.values,
y=ytrain_categorical,
batch_size=Xtrain.shape[0],
validation_data=(Xvalid.values, yvalid_categorical),
epochs=50,
verbose=0)
plot_curves(history.history['loss'],
history.history['val_loss'],
label='Loss')
plot_curves(history.history['accuracy'],
history.history['val_accuracy'],
label='Accuracy')
np.mean(history.history['val_accuracy'][-5:])
```
Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| github_jupyter |
# Counterfactual explanations with one-hot encoded categorical variables
Real world machine learning applications often handle data with categorical variables. Explanation methods which rely on perturbations of the input features need to make sure those perturbations are meaningful and capture the underlying structure of the data. This becomes tricky for categorical features. For instance random perturbations across possible categories or enforcing a ranking between categories based on frequency of occurrence in the training data do not capture this structure. Our method captures the relation between categories of a variable numerically through the context given by the other features in the data and/or the predictions made by the model. First it captures the pairwise distances between categories and then applies multi-dimensional scaling. More details about the method can be found in the [documentation](https://docs.seldon.io/projects/alibi/en/latest/methods/CFProto.html). The example notebook illustrates this approach on the *adult* dataset, which contains a mixture of categorical and numerical features used to predict whether a person's income is above or below $50k.
```
import tensorflow as tf
tf.get_logger().setLevel(40) # suppress deprecation messages
tf.compat.v1.disable_v2_behavior() # disable TF2 behaviour as alibi code still relies on TF1 constructs
from tensorflow.keras.layers import Dense, Dropout, Input
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
from sklearn.preprocessing import OneHotEncoder
from time import time
from alibi.datasets import fetch_adult
from alibi.explainers import CounterfactualProto
from alibi.utils.mapping import ohe_to_ord, ord_to_ohe
print('TF version: ', tf.__version__)
print('Eager execution enabled: ', tf.executing_eagerly()) # False
```
## Load adult dataset
The `fetch_adult` function returns a `Bunch` object containing the features, the targets, the feature names and a mapping of the categories in each categorical variable.
```
adult = fetch_adult()
data = adult.data
target = adult.target
feature_names = adult.feature_names
category_map_tmp = adult.category_map
target_names = adult.target_names
```
Define shuffled training and test set:
```
def set_seed(s=0):
np.random.seed(s)
tf.random.set_seed(s)
set_seed()
data_perm = np.random.permutation(np.c_[data, target])
X = data_perm[:,:-1]
y = data_perm[:,-1]
idx = 30000
y_train, y_test = y[:idx], y[idx+1:]
```
Reorganize data so categorical features come first:
```
X = np.c_[X[:, 1:8], X[:, 11], X[:, 0], X[:, 8:11]]
```
Adjust `feature_names` and `category_map` as well:
```
feature_names = feature_names[1:8] + feature_names[11:12] + feature_names[0:1] + feature_names[8:11]
print(feature_names)
category_map = {}
for i, (_, v) in enumerate(category_map_tmp.items()):
category_map[i] = v
```
Create a dictionary with as keys the categorical columns and values the number of categories for each variable in the dataset:
```
cat_vars_ord = {}
n_categories = len(list(category_map.keys()))
for i in range(n_categories):
cat_vars_ord[i] = len(np.unique(X[:, i]))
print(cat_vars_ord)
```
Since we will apply one-hot encoding (OHE) on the categorical variables, we convert `cat_vars_ord` from the ordinal to OHE format. `alibi.utils.mapping` contains utility functions to do this. The keys in `cat_vars_ohe` now represent the first column index for each one-hot encoded categorical variable. This dictionary will later be used in the counterfactual explanation.
```
cat_vars_ohe = ord_to_ohe(X, cat_vars_ord)[1]
print(cat_vars_ohe)
```
## Preprocess data
Scale numerical features between -1 and 1:
```
X_num = X[:, -4:].astype(np.float32, copy=False)
xmin, xmax = X_num.min(axis=0), X_num.max(axis=0)
rng = (-1., 1.)
X_num_scaled = (X_num - xmin) / (xmax - xmin) * (rng[1] - rng[0]) + rng[0]
X_num_scaled_train = X_num_scaled[:idx, :]
X_num_scaled_test = X_num_scaled[idx+1:, :]
```
Apply OHE to categorical variables:
```
X_cat = X[:, :-4].copy()
ohe = OneHotEncoder(categories='auto')
ohe.fit(X_cat)
X_cat_ohe = ohe.transform(X_cat)
```
Combine numerical and categorical data:
```
X = np.c_[X_cat_ohe.todense(), X_num_scaled].astype(np.float32, copy=False)
X_train, X_test = X[:idx, :], X[idx+1:, :]
print(X_train.shape, X_test.shape)
```
## Train neural net
```
def nn_ohe():
x_in = Input(shape=(57,))
x = Dense(60, activation='relu')(x_in)
x = Dropout(.2)(x)
x = Dense(60, activation='relu')(x)
x = Dropout(.2)(x)
x = Dense(60, activation='relu')(x)
x = Dropout(.2)(x)
x_out = Dense(2, activation='softmax')(x)
nn = Model(inputs=x_in, outputs=x_out)
nn.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return nn
set_seed()
nn = nn_ohe()
nn.summary()
nn.fit(X_train, to_categorical(y_train), batch_size=256, epochs=30, verbose=0)
```
## Generate counterfactual
Original instance:
```
X = X_test[0].reshape((1,) + X_test[0].shape)
```
Initialize counterfactual parameters. The feature perturbations are applied in the numerical feature space, after transforming the categorical variables to numerical features. As a result, the dimensionality and values of `feature_range` are defined in the numerical space.
```
shape = X.shape
beta = .01
c_init = 1.
c_steps = 5
max_iterations = 500
rng = (-1., 1.) # scale features between -1 and 1
rng_shape = (1,) + data.shape[1:]
feature_range = ((np.ones(rng_shape) * rng[0]).astype(np.float32),
(np.ones(rng_shape) * rng[1]).astype(np.float32))
```
Initialize explainer:
```
def set_seed(s=0):
np.random.seed(s)
tf.random.set_seed(s)
set_seed()
cf = CounterfactualProto(nn,
shape,
beta=beta,
cat_vars=cat_vars_ohe,
ohe=True, # OHE flag
max_iterations=max_iterations,
feature_range=feature_range,
c_init=c_init,
c_steps=c_steps
)
```
Fit explainer. `d_type` refers to the distance metric used to convert the categorical to numerical values. Valid options are `abdm`, `mvdm` and `abdm-mvdm`. `abdm` infers the distance between categories of the same variable from the context provided by the other variables. This requires binning of the numerical features as well. `mvdm` computes the distance using the model predictions, and `abdm-mvdm` combines both methods. More info on both distance measures can be found in the [documentation](https://docs.seldon.io/projects/alibi/en/latest/methods/CFProto.html).
```
cf.fit(X_train, d_type='abdm', disc_perc=[25, 50, 75]);
```
We can now visualize the transformation from the categorical to numerical values for each category. The example below shows that the **Education** feature is ordered from *High School Dropout* to having obtained a *Doctorate* degree. As a result, if we perturb an instance representing a person that has obtained a *Bachelors* degree, the nearest perturbations will result in a counterfactual instance with either a *Masters* or an *Associates* degree.
```
def plot_bar(dist, cols, figsize=(10,4)):
dist = dist.reshape(dist.shape[0])
idx = np.argsort(dist)
fig, ax = plt.subplots(figsize=figsize)
plt.bar(cols[idx], dist[idx])
print(cols[idx])
cat = 'Education'
idx = feature_names.index(cat)
plot_bar(cf.d_abs[idx], np.array(category_map[idx]), figsize=(20,4))
```
Explain instance:
```
explanation = cf.explain(X)
```
Helper function to more clearly describe explanations:
```
def describe_instance(X, explanation, eps=1e-2):
print('Original instance: {} -- proba: {}'.format(target_names[explanation.orig_class],
explanation.orig_proba[0]))
print('Counterfactual instance: {} -- proba: {}'.format(target_names[explanation.cf['class']],
explanation.cf['proba'][0]))
print('\nCounterfactual perturbations...')
print('\nCategorical:')
X_orig_ord = ohe_to_ord(X, cat_vars_ohe)[0]
X_cf_ord = ohe_to_ord(explanation.cf['X'], cat_vars_ohe)[0]
delta_cat = {}
for i, (_, v) in enumerate(category_map.items()):
cat_orig = v[int(X_orig_ord[0, i])]
cat_cf = v[int(X_cf_ord[0, i])]
if cat_orig != cat_cf:
delta_cat[feature_names[i]] = [cat_orig, cat_cf]
if delta_cat:
for k, v in delta_cat.items():
print('{}: {} --> {}'.format(k, v[0], v[1]))
print('\nNumerical:')
delta_num = X_cf_ord[0, -4:] - X_orig_ord[0, -4:]
n_keys = len(list(cat_vars_ord.keys()))
for i in range(delta_num.shape[1]):
if np.abs(delta_num[0, i]) > eps:
print('{}: {:.2f} --> {:.2f}'.format(feature_names[i+n_keys],
X_orig_ord[0,i+n_keys],
X_cf_ord[0,i+n_keys]))
describe_instance(X, explanation)
```
By obtaining a higher level of education the income is predicted to be above $50k.
## Change the categorical distance metric
Instead of `abdm`, we now use `mvdm` as our distance metric.
```
set_seed()
cf.fit(X_train, d_type='mvdm')
explanation = cf.explain(X)
describe_instance(X, explanation)
```
The same conclusion hold using a different distance metric.
## Use k-d trees to build prototypes
We can also use *k-d trees* to build class prototypes to guide the counterfactual to nearby instances in the counterfactual class as described in [Interpretable Counterfactual Explanations Guided by Prototypes](https://arxiv.org/abs/1907.02584).
```
use_kdtree = True
theta = 10. # weight of prototype loss term
```
Initialize, fit and explain instance:
```
set_seed()
X = X_test[7].reshape((1,) + X_test[0].shape)
cf = CounterfactualProto(nn,
shape,
beta=beta,
theta=theta,
cat_vars=cat_vars_ohe,
ohe=True,
use_kdtree=use_kdtree,
max_iterations=max_iterations,
feature_range=feature_range,
c_init=c_init,
c_steps=c_steps
)
cf.fit(X_train, d_type='abdm')
explanation = cf.explain(X)
describe_instance(X, explanation)
```
By slightly increasing the age of the person the income would be predicted to be above $50k.
## Use an autoencoder to build prototypes
Another option is to use an autoencoder to guide the perturbed instance to the counterfactual class. We define and train the autoencoder:
```
def ae_model():
# encoder
x_in = Input(shape=(57,))
x = Dense(60, activation='relu')(x_in)
x = Dense(30, activation='relu')(x)
x = Dense(15, activation='relu')(x)
encoded = Dense(10, activation=None)(x)
encoder = Model(x_in, encoded)
# decoder
dec_in = Input(shape=(10,))
x = Dense(15, activation='relu')(dec_in)
x = Dense(30, activation='relu')(x)
x = Dense(60, activation='relu')(x)
decoded = Dense(57, activation=None)(x)
decoder = Model(dec_in, decoded)
# autoencoder = encoder + decoder
x_out = decoder(encoder(x_in))
autoencoder = Model(x_in, x_out)
autoencoder.compile(optimizer='adam', loss='mse')
return autoencoder, encoder, decoder
set_seed()
ae, enc, dec = ae_model()
ae.summary()
ae.fit(X_train, X_train, batch_size=128, epochs=100, validation_data=(X_test, X_test), verbose=0)
```
Weights for the autoencoder and prototype loss terms:
```
beta = .1 # L1
gamma = 10. # autoencoder
theta = .1 # prototype
```
Initialize, fit and explain instance:
```
set_seed()
X = X_test[19].reshape((1,) + X_test[0].shape)
cf = CounterfactualProto(nn,
shape,
beta=beta,
enc_model=enc,
ae_model=ae,
gamma=gamma,
theta=theta,
cat_vars=cat_vars_ohe,
ohe=True,
max_iterations=max_iterations,
feature_range=feature_range,
c_init=c_init,
c_steps=c_steps
)
cf.fit(X_train, d_type='abdm')
explanation = cf.explain(X)
describe_instance(X, explanation)
```
## Black box model with k-d trees
Now we assume that we only have access to the model's prediction function and treat it as a black box. The k-d trees are again used to define the prototypes.
```
use_kdtree = True
theta = 10. # weight of prototype loss term
```
Initialize, fit and explain instance:
```
set_seed()
X = X_test[24].reshape((1,) + X_test[0].shape)
# define predict function
predict_fn = lambda x: nn.predict(x)
cf = CounterfactualProto(predict_fn,
shape,
beta=beta,
theta=theta,
cat_vars=cat_vars_ohe,
ohe=True,
use_kdtree=use_kdtree,
max_iterations=max_iterations,
feature_range=feature_range,
c_init=c_init,
c_steps=c_steps
)
cf.fit(X_train, d_type='abdm')
explanation = cf.explain(X)
describe_instance(X, explanation)
```
If the person was younger and worked less, he or she would have a predicted income below $50k.
| github_jupyter |
# {glue:text}`jupyter_github_org`
**Activity from {glue:}`jupyter_start` to {glue:}`jupyter_stop`**
```
from datetime import date
from dateutil.relativedelta import relativedelta
from myst_nb import glue
import seaborn as sns
import pandas as pd
import numpy as np
import altair as alt
from markdown import markdown
from IPython.display import Markdown
from ipywidgets.widgets import HTML, Tab
from ipywidgets import widgets
from datetime import timedelta
from matplotlib import pyplot as plt
import os.path as op
from warnings import simplefilter
simplefilter('ignore')
# Altair config
def author_url(author):
return f"https://github.com/{author}"
def alt_theme():
return {
'config': {
'axisLeft': {
'labelFontSize': 15,
},
'axisBottom': {
'labelFontSize': 15,
},
}
}
alt.themes.register('my_theme', alt_theme)
alt.themes.enable("my_theme")
# Define colors we'll use for GitHub membership
author_types = ['MEMBER', 'CONTRIBUTOR', 'COLLABORATOR', "NONE"]
author_palette = np.array(sns.palettes.blend_palette(["lightgrey", "lightgreen", "darkgreen"], 4)) * 256
author_colors = ["rgb({}, {}, {})".format(*color) for color in author_palette]
author_color_dict = {key: val for key, val in zip(author_types, author_palette)}
github_org = "jupyterhub"
top_n_repos = 15
n_days = 10
# Parameters
github_org = "jupyter"
n_days = 90
############################################################
# Variables
stop = date.today()
start = date.today() - relativedelta(days=n_days)
# Strings for use in queries
start_date = f"{start:%Y-%m-%d}"
stop_date = f"{stop:%Y-%m-%d}"
# Glue variables for use in markdown
glue(f"{github_org}_github_org", github_org, display=False)
glue(f"{github_org}_start", start_date, display=False)
glue(f"{github_org}_stop", stop_date, display=False)
```
## Load data
Load and clean up the data
```
from pathlib import Path
path_data = Path("../data")
comments = pd.read_csv(path_data.joinpath('comments.csv'), index_col=None).drop_duplicates()
issues = pd.read_csv(path_data.joinpath('issues.csv'), index_col=None).drop_duplicates()
prs = pd.read_csv(path_data.joinpath('prs.csv'), index_col=None).drop_duplicates()
for idata in [comments, issues, prs]:
idata.query("org == @github_org", inplace=True)
# What are the top N repos, we will only plot these in the full data plots
top_commented_repos = comments.groupby("repo").count().sort_values("createdAt", ascending=False)['createdAt']
use_repos = top_commented_repos.head(top_n_repos).index.tolist()
```
## Merged Pull requests
Here's an analysis of **merged pull requests** across each of the repositories in the Jupyter
ecosystem.
```
merged = prs.query('state == "MERGED" and closedAt > @start_date and closedAt < @stop_date')
prs_by_repo = merged.groupby(['org', 'repo']).count()['author'].reset_index().sort_values(['org', 'author'], ascending=False)
alt.Chart(data=prs_by_repo, title=f"Merged PRs in the last {n_days} days").mark_bar().encode(
x=alt.X('repo', sort=prs_by_repo['repo'].values.tolist()),
y='author',
color='org'
)
```
### Authoring and merging stats by repository
Let's see who has been doing most of the PR authoring and merging. The PR author is generally the
person that implemented a change in the repository (code, documentation, etc). The PR merger is
the person that "pressed the green button" and got the change into the main codebase.
```
# Prep our merging DF
merged_by_repo = merged.groupby(['repo', 'author'], as_index=False).agg({'id': 'count', 'authorAssociation': 'first'}).rename(columns={'id': "authored", 'author': 'username'})
closed_by_repo = merged.groupby(['repo', 'mergedBy']).count()['id'].reset_index().rename(columns={'id': "closed", "mergedBy": "username"})
charts = []
title = f"PR authors for {github_org} in the last {n_days} days"
this_data = merged_by_repo.replace(np.nan, 0).groupby('username', as_index=False).agg({'authored': 'sum', 'authorAssociation': 'first'})
this_data = this_data.sort_values('authored', ascending=False)
ch = alt.Chart(data=this_data, title=title).mark_bar().encode(
x='username',
y='authored',
color=alt.Color('authorAssociation', scale=alt.Scale(domain=author_types, range=author_colors))
)
ch
charts = []
title = f"Merges for {github_org} in the last {n_days} days"
ch = alt.Chart(data=closed_by_repo.replace(np.nan, 0), title=title).mark_bar().encode(
x='username',
y='closed',
)
ch
```
## Issues
Issues are **conversations** that happen on our GitHub repositories. Here's an
analysis of issues across the Jupyter organizations.
```
created = issues.query('state == "OPEN" and createdAt > @start_date and createdAt < @stop_date')
closed = issues.query('state == "CLOSED" and closedAt > @start_date and closedAt < @stop_date')
created_counts = created.groupby(['org', 'repo']).count()['number'].reset_index()
created_counts['org/repo'] = created_counts.apply(lambda a: a['org'] + '/' + a['repo'], axis=1)
sorted_vals = created_counts.sort_values(['org', 'number'], ascending=False)['repo'].values
alt.Chart(data=created_counts, title=f"Issues created in the last {n_days} days").mark_bar().encode(
x=alt.X('repo', sort=alt.Sort(sorted_vals.tolist())),
y='number',
)
closed_counts = closed.groupby(['org', 'repo']).count()['number'].reset_index()
closed_counts['org/repo'] = closed_counts.apply(lambda a: a['org'] + '/' + a['repo'], axis=1)
sorted_vals = closed_counts.sort_values(['number'], ascending=False)['repo'].values
alt.Chart(data=closed_counts, title=f"Issues closed in the last {n_days} days").mark_bar().encode(
x=alt.X('repo', sort=alt.Sort(sorted_vals.tolist())),
y='number',
)
created_closed = pd.merge(created_counts.rename(columns={'number': 'created'}).drop(columns='org/repo'),
closed_counts.rename(columns={'number': 'closed'}).drop(columns='org/repo'),
on=['org', 'repo'], how='outer')
created_closed = pd.melt(created_closed, id_vars=['org', 'repo'], var_name="kind", value_name="count").replace(np.nan, 0)
charts = []
# Pick the top 10 repositories
top_repos = created_closed.groupby(['repo']).sum().sort_values(by='count', ascending=False).head(10).index
ch = alt.Chart(created_closed.query('repo in @top_repos'), width=120).mark_bar().encode(
x=alt.X("kind", axis=alt.Axis(labelFontSize=15, title="")),
y=alt.Y('count', axis=alt.Axis(titleFontSize=15, labelFontSize=12)),
color='kind',
column=alt.Column("repo", header=alt.Header(title=f"Issue activity, last {n_days} days for {github_org}", titleFontSize=15, labelFontSize=12))
)
ch
# Set to datetime
for kind in ['createdAt', 'closedAt']:
closed.loc[:, kind] = pd.to_datetime(closed[kind])
closed.loc[:, 'time_open'] = closed['closedAt'] - closed['createdAt']
closed.loc[:, 'time_open'] = closed['time_open'].dt.total_seconds()
time_open = closed.groupby(['org', 'repo']).agg({'time_open': 'median'}).reset_index()
time_open['time_open'] = time_open['time_open'] / (60 * 60 * 24)
time_open['org/repo'] = time_open.apply(lambda a: a['org'] + '/' + a['repo'], axis=1)
sorted_vals = time_open.sort_values(['org', 'time_open'], ascending=False)['repo'].values
alt.Chart(data=time_open, title=f"Time to close for issues closed in the last {n_days} days").mark_bar().encode(
x=alt.X('repo', sort=alt.Sort(sorted_vals.tolist())),
y=alt.Y('time_open', title="Median Days Open"),
)
```
## Most-upvoted issues
```
thumbsup = issues.sort_values("thumbsup", ascending=False).head(25)
thumbsup = thumbsup[["title", "url", "number", "thumbsup", "repo"]]
text = []
for ii, irow in thumbsup.iterrows():
itext = f"- ({irow['thumbsup']}) {irow['title']} - {irow['repo']} - [#{irow['number']}]({irow['url']})"
text.append(itext)
text = '\n'.join(text)
HTML(markdown(text))
```
## Commenters across repositories
These are commenters across all issues and pull requests in the last several days.
These are colored by the commenter's association with the organization. For information
about what these associations mean, [see this StackOverflow post](https://stackoverflow.com/a/28866914/1927102).
```
commentors = (
comments
.query("createdAt > @start_date and createdAt < @stop_date")
.groupby(['org', 'repo', 'author', 'authorAssociation'])
.count().rename(columns={'id': 'count'})['count']
.reset_index()
.sort_values(['org', 'count'], ascending=False)
)
n_plot = 50
charts = []
for ii, (iorg, idata) in enumerate(commentors.groupby(['org'])):
title = f"Top {n_plot} commentors for {iorg} in the last {n_days} days"
idata = idata.groupby('author', as_index=False).agg({'count': 'sum', 'authorAssociation': 'first'})
idata = idata.sort_values('count', ascending=False).head(n_plot)
ch = alt.Chart(data=idata.head(n_plot), title=title).mark_bar().encode(
x='author',
y='count',
color=alt.Color('authorAssociation', scale=alt.Scale(domain=author_types, range=author_colors))
)
charts.append(ch)
alt.hconcat(*charts)
```
## First responders
First responders are the first people to respond to a new issue in one of the repositories.
The following plots show first responders for recently-created issues.
```
first_comments = []
for (org, repo, issue_id), i_comments in comments.groupby(['org', 'repo', 'id']):
ix_min = pd.to_datetime(i_comments['createdAt']).idxmin()
first_comment = i_comments.loc[ix_min]
if isinstance(first_comment, pd.DataFrame):
first_comment = first_comment.iloc[0]
first_comments.append(first_comment)
first_comments = pd.concat(first_comments, axis=1).T
# Make up counts for viz
first_responder_counts = first_comments.groupby(['org', 'author', 'authorAssociation'], as_index=False).\
count().rename(columns={'id': 'n_first_responses'}).sort_values(['org', 'n_first_responses'], ascending=False)
n_plot = 50
title = f"Top {n_plot} first responders for {github_org} in the last {n_days} days"
idata = first_responder_counts.groupby('author', as_index=False).agg({'n_first_responses': 'sum', 'authorAssociation': 'first'})
idata = idata.sort_values('n_first_responses', ascending=False).head(n_plot)
ch = alt.Chart(data=idata.head(n_plot), title=title).mark_bar().encode(
x='author',
y='n_first_responses',
color=alt.Color('authorAssociation', scale=alt.Scale(domain=author_types, range=author_colors))
)
ch
```
## Recent activity
### A list of merged PRs by project
Below is a tabbed readout of recently-merged PRs. Check out the title to get an idea for what they
implemented, and be sure to thank the PR author for their hard work!
```
tabs = widgets.Tab(children=[])
for ii, ((org, repo), imerged) in enumerate(merged.query("repo in @use_repos").groupby(['org', 'repo'])):
merged_by = {}
pr_by = {}
issue_md = []
issue_md.append(f"#### Closed PRs for repo: [{org}/{repo}](https://github.com/{github_org}/{repo})")
issue_md.append("")
issue_md.append(f"##### ")
for _, ipr in imerged.iterrows():
user_name = ipr['author']
user_url = author_url(user_name)
pr_number = ipr['number']
pr_html = ipr['url']
pr_title = ipr['title']
pr_closedby = ipr['mergedBy']
pr_closedby_url = f"https://github.com/{pr_closedby}"
if user_name not in pr_by:
pr_by[user_name] = 1
else:
pr_by[user_name] += 1
if pr_closedby not in merged_by:
merged_by[pr_closedby] = 1
else:
merged_by[pr_closedby] += 1
text = f"* [(#{pr_number})]({pr_html}): _{pr_title}_ by **[@{user_name}]({user_url})** merged by **[@{pr_closedby}]({pr_closedby_url})**"
issue_md.append(text)
issue_md.append('')
markdown_html = markdown('\n'.join(issue_md))
children = list(tabs.children)
children.append(HTML(markdown_html))
tabs.children = tuple(children)
tabs.set_title(ii, repo)
tabs
```
### A list of recent issues
Below is a list of issues with recent activity in each repository. If they seem of interest
to you, click on their links and jump in to participate!
```
# Add comment count data to issues and PRs
comment_counts = (
comments
.query("createdAt > @start_date and createdAt < @stop_date")
.groupby(['org', 'repo', 'id'])
.count().iloc[:, 0].to_frame()
)
comment_counts.columns = ['n_comments']
comment_counts = comment_counts.reset_index()
n_plot = 5
tabs = widgets.Tab(children=[])
for ii, (repo, i_issues) in enumerate(comment_counts.query("repo in @use_repos").groupby('repo')):
issue_md = []
issue_md.append("")
issue_md.append(f"##### [{github_org}/{repo}](https://github.com/{github_org}/{repo})")
top_issues = i_issues.sort_values('n_comments', ascending=False).head(n_plot)
top_issue_list = pd.merge(issues, top_issues, left_on=['org', 'repo', 'id'], right_on=['org', 'repo', 'id'])
for _, issue in top_issue_list.sort_values('n_comments', ascending=False).head(n_plot).iterrows():
user_name = issue['author']
user_url = author_url(user_name)
issue_number = issue['number']
issue_html = issue['url']
issue_title = issue['title']
text = f"* [(#{issue_number})]({issue_html}): _{issue_title}_ by **[@{user_name}]({user_url})**"
issue_md.append(text)
issue_md.append('')
md_html = HTML(markdown('\n'.join(issue_md)))
children = list(tabs.children)
children.append(HTML(markdown('\n'.join(issue_md))))
tabs.children = tuple(children)
tabs.set_title(ii, repo)
display(Markdown(f"Here are the top {n_plot} active issues in each repository in the last {n_days} days"))
display(tabs)
```
| github_jupyter |
# XOR Prediction Neural Network
#### A simple neural network which will learn the XOR logic gate.
I will provide you with any links necessary so that you can read about the different aspects of this NN(Neural Network).
## Neural Network Info
#### All information regarding the neural network:
- Input Layer Units = 2 (Can be modified)
- Hidden Layer Units = 2 (Can be modified)
- Output Layer Units = 1 (Since this is problem specific, it can't be modified)
- No. of hidden layers = 1
- Learning Algorithm = Backpropagation

Feel free to mess around with it and try out different things.
```
import numpy as np # For matrix math
import matplotlib.pyplot as plt # For plotting
import sys # For printing
```
### Neural Network Implementation
Initially, I was going to approach this in an Object Oriented manner but I think that it would be much easier to read and implement, functionally. So, let's get started.
### Training Data
The XOR logic gate returns true when the number of inputs given is odd and false when they're even. Here is the simple training dataset.
```
# The training data.
X = np.array([
[0, 1],
[1, 0],
[1, 1],
[0, 0]
])
# The labels for the training data.
y = np.array([
[1],
[1],
[0],
[0]
])
outNN =np.zeros([4])
X
y
```
### Additional Parameters
These are just additional parameters which are required by the weights for their dimensions.
```
num_i_units = 2 # Number of Input units
num_h_units = 2 # Number of Hidden units
num_o_units = 1 # Number of Output units
```
### Neural Network Parameters
These are the parameters required directly by the NN. Comments should describe the variables.
```
# The learning rate for Gradient Descent.
learning_rate = 0.01
# The parameter to help with overfitting.
reg_param = 0
# Maximum iterations for Gradient Descent.
max_iter = 100
# Number of training examples
m = 4
```
### Weights and Biases
These are the numbers the NN needs to learn to make accurate predictions.
For the connections being made from the input layer to the hidden layer, the weights and biases are arranged in the following order: **each row contains the weights for each hidden unit**. Then, the shape of these set of weights is: *(number of hidden units X number of input units)* and the shape of the biases for this connection will be: *(number of hidden units X 1)*.
So, the overall shape of the weights and biases are:
**Weights1(Connection from input to hidden layers)**: num_h_units X num_i_units
**Biases1(Connection from input to hidden layers)**: num_h_units X 1
**Weights2(Connection from hidden to output layers)**: num_o_units X num_h_units
**Biases2(Connection from hidden to output layers)**: num_o_units X 1
### Generating the Weights
The weights here are going to be generated using a [Normal Distribution(Gaussian Distribution)](http://mathworld.wolfram.com/NormalDistribution.html). They will also be seeded so that the outcome always comes out the same.
```
np.random.seed(1)
W1 = np.random.normal(0, 1, (num_h_units, num_i_units)) # 2x2
W2 = np.random.normal(0, 1, (num_o_units, num_h_units)) # 1x2
B1 = np.random.random((num_h_units, 1)) # 2x1
B2 = np.random.random((num_o_units, 1)) # 1x1
W1
W2
B1
B2
```
### Sigmoid Function
[This](http://mathworld.wolfram.com/SigmoidFunction.html) function maps any input to a value between 0 and 1.

In my implementation, I have added a boolean which if set to true, will return [Sigmoid Prime(the derivative of the sigmoid function)](http://www.ai.mit.edu/courses/6.892/lecture8-html/sld015.htm) of the input value. This will be used in backpropagation later on.
```
def sigmoid(z, derv=False):
if derv: return z * (1 - z)
return 1 / (1 + np.exp(-z))
```
### Forward Propagation
[This](https://en.wikipedia.org/wiki/Feedforward_neural_network) is how predictions are made. Propagating the input through the NN to get the output.
In my implementation, the forward function only accepts a feature vector as row vector which is then converted to a column vector. Also, the predict boolean, if set to true, only returns the output. Otherwise, it returns a tuple of the outputs of all the layers.
```
def forward(x, predict=False):
a1 = x.reshape(x.shape[0], 1) # Getting the training example as a column vector.
z2= W1.dot(a1) + B1 # 2x2 * 2x1 + 2x1 = 2x1
a2 = sigmoid(z2) # 2x1
z3 = W2.dot(a2) + B2 # 1x2 * 2x1 + 1x1 = 1x1
a3 = sigmoid(z3)
if predict: return a3
return (a1, a2, a3)
```
### Gradients for the Weights and Biases
These variables will contain the gradients for the weights and biases which will be used by gradient descent to update the weights and biases.
Also, creating the vector which will be storing the cost values for each gradient descent iteration to help visualize the cost as the weights and biases are updated.
```
dW1 = 0 # Gradient for W1
dW2 = 0 # Gradient for W2
dB1 = 0 # Gradient for B1
dB2 = 0 # Gradient for B2
cost = np.zeros((max_iter, 1)) # Column vector to record the cost of the NN after each Gradient Descent iteration.
```
## Training
This is the training function which contains the meat of NN. This contains forward propagation and [Backpropagation](http://neuralnetworksanddeeplearning.com/chap2.html).
### Backpropagation
The process of propagating the error in the output layer, backwards through the NN to calculate the error in each layer. Intuition: It's like forward propagation, but backwards.
Steps(for this NN):
1. Calculate the error in the output layer(dz2).
2. Calculate the error in the weights connecting the hidden layer to the output layer using dz2 (dW2).
3. Calculate the error in the hidden layer(dz1).
4. Calculate the error in the weights connecting the input layer to the hidden layer using dz1 (dW1).
5. The errors in the biases are just the errors in the respective layers.
Afterwards, the gradients(errors) of the weights and biases are used to update the corresponding weights and biases by multiplying them with the negative of the learning rate and scaling it by divinding it by the number of training examples.
While iterating over all the training examples, the cost is also being calculated simultaneously for each example. Then, a regurlization parameter is added, although for such a small dataset, regularization is unnecessary since to perform well, the NN will have to over fit to the training data.
```
def train(_W1, _W2, _B1, _B2): # The arguments are to bypass UnboundLocalError error
for i in range(max_iter):
c = 0
dW1 = 0
dW2 = 0
dB1 = 0
dB2 = 0
for j in range(m):
sys.stdout.write("\rIteration: {} and {}".format(i + 1, j + 1))
# Forward Prop.
a0 = X[j].reshape(X[j].shape[0], 1) # 2x1
z1 = _W1.dot(a0) + _B1 # 2x2 * 2x1 + 2x1 = 2x1
a1 = sigmoid(z1) # 2x1
z2 = _W2.dot(a1) + _B2 # 1x2 * 2x1 + 1x1 = 1x1
a2 = sigmoid(z2) # 1x1
# Back prop.
dz2 = a2 - y[j] # 1x1
dW2 += dz2 * a1.T # 1x1 .* 1x2 = 1x2
dz1 = np.multiply((_W2.T * dz2), sigmoid(a1, derv=True)) # (2x1 * 1x1) .* 2x1 = 2x1
dW1 += dz1.dot(a0.T) # 2x1 * 1x2 = 2x2
dB1 += dz1 # 2x1
dB2 += dz2 # 1x1
c = c + (-(y[j] * np.log(a2)) - ((1 - y[j]) * np.log(1 - a2)))
sys.stdout.flush() # Updating the text.
_W1 = _W1 - learning_rate * (dW1 / m) + ( (reg_param / m) * _W1)
_W2 = _W2 - learning_rate * (dW2 / m) + ( (reg_param / m) * _W2)
_B1 = _B1 - learning_rate * (dB1 / m)
_B2 = _B2 - learning_rate * (dB2 / m)
cost[i] = (c / m) + (
(reg_param / (2 * m)) *
(
np.sum(np.power(_W1, 2)) +
np.sum(np.power(_W2, 2))
)
)
return (_W1, _W2, _B1, _B2)
```
## Running
Now, let's try out the NN. Here, I have called the train() function. You can make any changes you like and then run all the kernels again. I have also plotted the cost function to visual how the NN performed.
The console printing might be off.
The weights and biases are then shown.
```
W1, W2, B1, B2 = train(W1, W2, B1, B2)
W1
W2
B1
B2
```
### Plotting
Now, let's plot a simple plot showing the cost function with respect to the number of iterations of gradient descent.
```
# Assigning the axes to the different elements.
plt.plot(range(max_iter), cost)
# Labelling the x axis as the iterations axis.
plt.xlabel("Iterations")
# Labelling the y axis as the cost axis.
plt.ylabel("Cost")
# Showing the plot.
plt.show()
```
# Observation
With the initial parameters, the cost function doesn't look that good. It is decreasing which is a good sign but it isn't flattening out. I have tried, multiple different values but this some seems like the best fit.
Try out your own values, run the notebook again and see what you get.
```
coba = np.array([
[0, 1],
[1, 0],
[1, 1],
[0, 0]
])
coba
# Forward Prop.
for j in range(4):
a0 = coba[j].reshape(coba[j].shape[0], 1) # 2x1
z1 = W1.dot(a0) + B1 # 2x2 * 2x1 + 2x1 = 2x1
a1 = sigmoid(z1) # 2x1
z2 = W2.dot(a1) + B2 # 1x2 * 2x1 + 1x1 = 1x1
outNN[j] = sigmoid(z2) # 1x1
outNN
plt.plot(y, 'bo', linewidth=2, markersize=12)
for j in range(4):
plt.plot(j,outNN[j], 'r+', linewidth=2, markersize=12)
for j in range(4):
plt.plot(y, 'bo', j,outNN[j], 'r+', linewidth=2, markersize=12)
```
| github_jupyter |
# Word segmentation of Lao bibliographic data
Install packages not available in Google Colab.
```
#!pip install laonlp
#!pip install pyicu
#!pip install pythainlp
#!pip install botok
import sys
import regex as re
import pandas as pd
from laonlp.tokenize import word_tokenize as lao_wt
from pythainlp.tokenize import word_tokenize as thai_wt
from khmernltk import word_tokenize as khmer_wt
from icu import BreakIterator, Locale
SUPPORTED_LANGUAGES = ['bo', 'bo_CN', 'bo_IN', 'km', 'km_KH', 'lo', 'lo_LA', 'my', 'my_MM', 'th', 'th_TH']
SUPPORTED_ENGINES = ['icu', 'laonlp', 'thainlp']
SUPPORTED_SEPERATORS = ['\u0020', '\u007C', '\u200B']
def laonlp_tokenise(s, sep):
s = sep.join(lao_wt(s))
s = re.sub(r"\s{2,}", " ", s)
return re.sub(r'\s([?.!"](?:\s|$))', r'\1', s)
#def thainlp_tokenise(s, sep):
# s = sep.join(thai_wt(s))
# s = re.sub(r"\s{2,}", " ", s)
# return re.sub(r'\s([?.!"](?:\s|$))', r'\1', s)
def iterate_breaks(text, bi):
bi.setText(text)
lastpos = 0
while True:
next_boundary = bi.nextBoundary()
if next_boundary == -1: return
yield text[lastpos:next_boundary]
lastpos = next_boundary
def icu_tokenise(s, l, sep):
if l.lower() == "lo":
bi = BreakIterator.createWordInstance(Locale('lo_LA'))
if l.lower() == "th":
bi = BreakIterator.createWordInstance(Locale('th_TH'))
s = sep.join(list(iterate_breaks(s, bi)))
s = re.sub(r"\s{2,}", " ", s)
s = re.sub(r'\s([?.!"](?:\s|$))', r'\1', s)
return s
def segment_words(text, engine="icu", lang="", sep="\u0020"):
engine = engine.lower()
lang = lang.replace("-", "_").split('.')[0]
if engine not in SUPPORTED_ENGINES:
print("Unsupported tokenisation engine specified", file=sys.stderr)
sys.exit(1)
if lang not in SUPPORTED_LANGUAGES:
print("Unsupported language specified", file=sys.stderr)
sys.exit(1)
if sep not in SUPPORTED_SEPERATORS:
print("Unsupported token seperator", file=sys.stderr)
sys.exit(1)
if engine == "icu":
return icu_tokenise(text, lang, sep)
if engine == "laonlp" and lang[0:2] == "lo":
return laonlp_tokenise(text, sep)
lao_data =[
["Kō̜n cha mī Mư̄ang Vīang Sai thān thīman kānpativat : bot banthưk khwāmsongcham / Somphō̜n Sīsuvanna.", "ກ່ອນຈະມີເມືອງວຽງໄຊ ຖານທີ່ທີ່ນ ການປະຕິຕິດ : ບົດບັນທຶກຄວາມຊົງຈຳ / ສົມພອນ ສີສີສີນນະ."],
["Lom hāi chai khō̧ng phǣndin / Kom Khāosān Mǣnying Lāo Sūn Kāng Sahāphan Mǣnying Lāo.", "ລົມຫາຍໃຈຂອງແຜ່ນດິນ / ກົມຂ່າວສານແມ່ຍິງລາວ ສູນກາງສະຫະພັນແມ່ມ່ງລາວ."],
["Sēnthāng sū santiphāp / khonkhwā læ hīaphīang, Suli Detvongphan.", "ເສັ້ນທາງສູ່ສັນຕິພາບ / ຄົ້ນຄວ້າ ແລະ ຮຽບຮຽງ, ສຸລິ ເດດວົງພັນ."]
]
source_df = pd.DataFrame(lao_data, columns = ['latin', 'lao'])
df = source_df
df
df['lao']
df['laonlp'] = df['lao'].map(lambda x: segment_words(x, engine="laonlp", lang="lo"))
df['icu'] = df['lao'].map(lambda x: segment_words(x, engine="icu", lang="lo"))
df
single_string = df.iloc[0][1]
single_string
segment_words(single_string, engine="icu", lang="lo", sep="|")
segment_words(single_string, engine="laonlp", lang="lo", sep="|")
```
## Khmer
__khmer-nltk__
* [Khmer Natural Language Processing in Python](https://towardsdatascience.com/khmer-natural-language-processing-in-python-c770afb84784)
* https://viblo.asia/p/nlp-khmer-word-segmentation-YWOZrgNNlQ
* https://medium.com/@phylypo/text-classification-with-scikit-learn-on-khmer-documents-1a395317d195
## Tibetan
* https://github.com/topics/tibetan-nlp
* [botok](https://github.com/Esukhia/botok)
* [pybo](https://github.com/Esukhia/pybo)
* [PyTib](https://github.com/Esukhia/PyTib)
## Myanmar
| github_jupyter |
# Predict rating of review using BoardGameGeek Reviews dataset
**The goal of this project is to use the corpus of reviews present in this dataset, learn the reviews and their corresponding rating.**
**Once the model is trained using the review data, we ask the user to input a new review and predict the rating of that review.**
We begin by importing all the basic libraries:
```
!pip install -q pandas numpy nltk scikit-learn matplotlib seaborn wordcloud ipython
import random
import gc
import re
import string
import pickle
import os
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import numpy as np
import pandas as pd
from IPython.display import display, HTML
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from sklearn.model_selection import train_test_split, KFold
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.feature_extraction.text import TfidfVectorizer
import seaborn as sns
from matplotlib import pyplot as plt
import nltk
from nltk.corpus import stopwords
nltk.download(['words', 'stopwords', 'wordnet'], quiet=True)
from google.colab import drive
drive.mount('/content/drive')
```
Here we define the paths of the dataset and the pickle files
We will be pickling our dataset after every major step due to save time
Due to the sheer amount of data we have, it does take time to do pre-processing
Also, I will be invoking the garbage collector often just to save memory as we need every bit of it due to the volume of input data
```
DATA_DIR = "/content/drive/My Drive/Colab Notebooks/data/"
DATASET = DATA_DIR + "bgg-13m-reviews.csv"
REVIEWS_PICKLE = DATA_DIR + "reviews.pkl"
REVIEWS_DATA_CHANGED = False
CLEAN_TEXT_PICKLE = DATA_DIR + "clean_text.pkl"
LEMMATIZED_PICKLE = DATA_DIR + "lemmatized.pkl"
STOPWORDS_PICKLE = DATA_DIR + "stopwords.pkl"
CLEANED_PICKLE = DATA_DIR + "cleaned_reviews.pkl"
MODEL_PICKLE = DATA_DIR + "model.pkl"
VOCABULARY_PICKLE = DATA_DIR + "vocab.pkl"
```
## Loading the data
Now that we've got the formalities out of our way, let's start exploring our dataset and see how we can solve this problem.
First, we need to load the dataset like so:
```
reviews = pd.DataFrame()
if os.path.isfile(REVIEWS_PICKLE) or REVIEWS_DATA_CHANGED:
print('Pickled reviews found. Loading from pickle file.')
reviews = pd.read_pickle(REVIEWS_PICKLE, compression="gzip")
else:
print("Reading reviews csv")
reviews = pd.read_csv(DATASET, usecols=['comment', 'rating'])
print("Pickling dataset")
reviews.to_pickle(REVIEWS_PICKLE, compression="gzip")
reviews = reviews.reindex(columns=['comment', 'rating'])
reviews = reviews.sample(frac=1, random_state=3).reset_index(drop=True)
gc.collect()
display(reviews)
```
## Dropping rows with empty reviews
Great! We've got our dataset loaded.
Also, consecutive runs should be faster because we've pickled the data
As we can see, we've got several rows with no review and just a rating
We will be removing this rows as we cannot use them
```
empty_reviews = reviews.comment.isna().sum()
print("Dropping {} rows from dataset".format(empty_reviews))
reviews.dropna(subset=['comment'], inplace=True)
reviews = reviews.reset_index(drop=True)
print("We now have {} reviews remaining".format(reviews.shape[0]))
gc.collect()
display(reviews)
```
## Cleaning the reviews
Wow! That was a huge difference. We've shed off about 10 million rows!
Now, we need to begin cleaning up these reviews as they have a lot of redundant information and context.
For this dataset, we will be removing the following patterns from the text:
- Words less than 3 and more than 15 characters
- Special characters
- URLs
- HTML tags
- All kinds of redundant whitespaces
- Words that have numbers between them
- Words that begin or end with a number
Once all this is done, we tokenize the reviews and save them in the dataframe
```
def clean_text(text):
text = text.lower().strip()
text = " ".join([w for w in text.split() if len(w) >= 3 and len(w) <= 15])
text = re.sub('\[.*?\]', '', text)
text = re.sub('https?://\S+|www\.\S+', '', text)
text = re.sub('<.*?>+', '', text)
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\n', '', text)
text = re.sub('\w+\d+\w*', '', text)
text = re.sub('\d+\w+\d*', '', text)
text = re.sub('\W+', '', text)
text = tokenizer.tokenize(text)
return text
if os.path.isfile(CLEAN_TEXT_PICKLE) or REVIEWS_DATA_CHANGED:
print("Pickled clean text found. Loading from pickle file.")
reviews = pd.read_pickle(CLEAN_TEXT_PICKLE, compression="gzip")
else:
print("Cleaning reviews")
tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+')
reviews['comment'] = reviews['comment'].apply(clean_text)
print("Pickling cleaned dataset")
reviews.to_pickle(CLEAN_TEXT_PICKLE, compression="gzip")
gc.collect()
display(reviews)
```
## Calculating unique words
We've gotten rid of most of the oddities in our text data.
But, when it comes to such huge models text is present in many forms of speech so it's better to lemmatize them. But, before we do that, let's see how many unique words we currently have.
```
uniq_words = []
unique_words = reviews.explode('comment').comment.nunique()
uniq_words.append(unique_words)
print("Unique words before lemmatizing {}".format(unique_words))
```
## Lemmatizing
Wow, that's a lot of words. Let's see what happens after lemmatizing our reviews.
```
def lemmatize_data(text):
return [lemmatizer.lemmatize(w) for w in text]
if os.path.isfile(LEMMATIZED_PICKLE) or REVIEWS_DATA_CHANGED:
print("Pickled lemmatized reviews found. Loading from pickle file.")
reviews = pd.read_pickle(LEMMATIZED_PICKLE, compression="gzip")
else:
print("Lemmatizing reviews")
lemmatizer = nltk.stem.WordNetLemmatizer()
reviews['comment'] = reviews['comment'].apply(lemmatize_data)
print("Pickling lemmatized reviews")
reviews.to_pickle(LEMMATIZED_PICKLE, compression="gzip")
display(reviews)
gc.collect()
unique_words = reviews.explode('comment').comment.nunique()
uniq_words.append(unique_words)
print("Unique words after lemmatizing {}".format(unique_words))
```
## Removing stopwords and non-english words
Now let's remove the stopwords as well as they are mostly redundant for our training and mainly help us understand context.
We do have quite a fair number of custom words to remove and I've identified some custom words that I've added to the list as well.
```
def remove_stopwords(text):
words = [
w for w in text if w not in stop_words and w in words_corpus or not w.isalpha()]
words = list(filter(lambda word: len(word) > 2, set(words)))
return words
if os.path.isfile(STOPWORDS_PICKLE) or REVIEWS_DATA_CHANGED:
print("Reading stopwords pickle")
reviews = pd.read_pickle(STOPWORDS_PICKLE, compression="gzip")
else:
words_corpus = set(nltk.corpus.words.words())
stop_words_json = {"en": ["a", "a's", "able", "about", "above", "according", "accordingly", "across", "actually", "after", "afterwards", "again", "against", "ain't", "aint", "all", "allow", "allows", "almost", "alone", "along", "already", "also", "although", "always", "am", "among", "amongst", "an", "and", "another", "any", "anybody", "anyhow", "anyone", "anything", "anyway", "anyways", "anywhere", "apart", "appear", "appreciate", "appropriate", "are", "aren't", "arent", "around", "as", "aside", "ask", "asking", "associated", "at", "available", "away", "awfully", "b", "be", "became", "because", "become", "becomes", "becoming", "been", "before", "beforehand", "behind", "being", "believe", "below", "beside", "besides", "best", "better", "between", "beyond", "both", "brief", "but", "by", "c", "c'mon", "cmon", "cs", "c's", "came", "can", "can't", "cannot", "cant", "cause", "causes", "certain", "certainly", "changes", "clearly", "co", "com", "come", "comes", "concerning", "consequently", "consider", "considering", "contain", "containing", "contains", "corresponding", "could", "couldn't", "course", "currently", "d", "definitely", "described", "despite", "did", "didn't", "different", "do", "does", "doesn't", "doesn", "doing", "don't", "done", "down", "downwards", "during", "e", "each", "edu", "eg", "eight", "either", "else", "elsewhere", "enough", "entirely", "especially", "et", "etc", "even", "ever", "every", "everybody", "everyone", "everything", "everywhere", "ex", "exactly", "example", "except", "f", "far", "few", "fifth", "first", "five", "followed", "following", "follows", "for", "former", "formerly", "forth", "four", "from", "further", "furthermore", "g", "get", "gets", "getting", "given", "gives", "go", "goes", "going", "gone", "got", "gotten", "greetings", "h", "had", "hadn't", "hadnt", "happens", "hardly", "has", "hasn't", "hasnt", "have", "haven't", "havent", "having", "he", "he's", "hes", "hello", "help", "hence", "her", "here", "here's", "heres", "hereafter", "hereby", "herein", "hereupon", "hers", "herself", "hi", "him", "himself", "his", "hither", "hopefully", "how", "howbeit", "however", "i", "i'd", "id", "i'll", "i'm", "im", "i've", "ive", "ie", "if", "ignored", "immediate", "in", "inasmuch", "inc", "indeed", "indicate", "indicated", "indicates", "inner", "insofar", "instead", "into", "inward", "is", "isn't", "isnt", "it", "it'd", "itd", "it'll", "itll", "it's", "its", "itself", "j", "just", "k", "keep", "keeps", "kept", "know", "known", "knows", "l", "last", "lately", "later", "latter", "latterly", "least", "less", "lest", "let", "let's", "lets", "like", "liked", "likely", "little", "look", "looking", "looks", "ltd", "m", "mainly", "many", "may", "maybe", "me", "mean", "meanwhile", "merely", "might", "more", "moreover", "most", "mostly", "much", "must", "my",
"myself", "n", "name", "namely", "nd", "near", "nearly", "necessary", "need", "needs", "neither", "never", "nevertheless", "new", "next", "nine", "no", "nobody", "non", "none", "noone", "nor", "normally", "not", "nothing", "novel", "now", "nowhere", "o", "obviously", "of", "off", "often", "oh", "ok", "okay", "old", "on", "once", "one", "ones", "only", "onto", "or", "other", "others", "otherwise", "ought", "our", "ours", "ourselves", "out", "outside", "over", "overall", "own", "p", "particular", "particularly", "per", "perhaps", "placed", "please", "plus", "possible", "presumably", "probably", "provides", "q", "que", "quite", "qv", "r", "rather", "rd", "re", "really", "reasonably", "regarding", "regardless", "regards", "relatively", "respectively", "right", "s", "said", "same", "saw", "say", "saying", "says", "second", "secondly", "see", "seeing", "seem", "seemed", "seeming", "seems", "seen", "self", "selves", "sensible", "sent", "serious", "seriously", "seven", "several", "shall", "she", "should", "shouldn't", "shouldnt", "since", "six", "so", "some", "somebody", "somehow", "someone", "something", "sometime", "sometimes", "somewhat", "somewhere", "soon", "sorry", "specified", "specify", "specifying", "still", "sub", "such", "sup", "sure", "t", "t's", "ts", "take", "taken", "tell", "tends", "th", "than", "thank", "thanks", "thanx", "that", "that's", "thats", "the", "their", "theirs", "them", "themselves", "then", "thence", "there", "there's", "theres", "thereafter", "thereby", "therefore", "therein", "theres", "thereupon", "these", "they", "they'd", "theyd", "they'll", "theyll", "they're", "theyre", "theyve", "they've", "think", "third", "this", "thorough", "thoroughly", "those", "though", "three", "through", "throughout", "thru", "thus", "to", "together", "too", "took", "toward", "towards", "tried", "tries", "truly", "try", "trying", "twice", "two", "u", "un", "under", "unfortunately", "unless", "unlikely", "until", "unto", "up", "upon", "us", "use", "used", "useful", "uses", "using", "usually", "uucp", "v", "value", "various", "very", "via", "viz", "vs", "w", "wa", "want", "wants", "was", "wasn't", "wasnt", "way", "we", "we'd", "we'll", "we're", "we've", "weve", "welcome", "well", "went", "were", "weren't", "werent", "what", "what's", "whats", "whatever", "when", "whence", "whenever", "where", "wheres", "where's", "whereafter", "whereas", "whereby", "wherein", "whereupon", "wherever", "whether", "which", "while", "whither", "who", "who's", "whos", "whoever", "whole", "whom", "whose", "why", "will", "willing", "wish", "with", "within", "without", "won't", "wont", "wonder", "would", "wouldn't", "wouldny", "x", "y", "yes", "yet", "you", "you'd", "youd", "you'll", "youll", "you're", "youre", "you've", "youve", "your", "yours", "yours", "yourself", "yourselves", "z", "zero"]}
stop_words_json_en = set(stop_words_json['en'])
stop_words_nltk_en = set(stopwords.words('english'))
custom_stop_words = ["rating", "ish", "havn", "dice", "end", "set", "doesnt", "give", "find", "doe", "system", "tile", "table", "deck", "box", "made", "part", "based", "worker", "wife", "put", "havent", "game", "play",
"player", "one", "two", "card", "ha", "wa", "dont", "board", "time", "make", "rule", "thing", "version", "mechanic", "year", "theme", "rating", "family", "child", "money", "edition", "collection", "piece", "wasnt", "didnt"]
stop_words = stop_words_nltk_en.union(
stop_words_json_en, custom_stop_words)
print("Removing {} stopwords from text".format(len(stop_words)))
print()
reviews['comment'] = reviews['comment'].apply(remove_stopwords)
print("Pickling stopwords data")
reviews.to_pickle(STOPWORDS_PICKLE, compression="gzip")
gc.collect()
print("After removing stopwords:")
display(reviews)
```
Let's see what the word count is now
```
unique_words = reviews.explode('comment').comment.nunique()
uniq_words.append(unique_words)
print(unique_words)
```
Wow! That a marked difference considering what we started off with. This will definitely help us during our training phase as it reduces training time and computations as well.
Moreover, this helps improve our accuracy as we will be targeting important keywords that contribute to a particular sentiment.
Let's see what the wordcloud looks like now.
```
x = [1, 2, 3]
y = [313013, 300678, 37633]
fig, ax = plt.subplots(figsize=(10, 8))
ax.plot(x, y, color="#663399")
ax.set_ylabel('Unique Words')
ax.set_xlabel('Bins')
plt.show()
```
## Finishing up and saving progress
In the process of all this cleaning, there would be several rows that have become empty.
Let's remove them and then pickle our dataset so that we don't have to do all this cleaning again.
```
def drop_empty_reviews(df):
df = df.drop(df[~df.comment.astype(bool)].index)
return df
if os.path.isfile(CLEANED_PICKLE) or REVIEWS_DATA_CHANGED:
print("Reading cleaned reviews pickle")
reviews = pd.read_pickle(CLEANED_PICKLE, compression="gzip")
else:
reviews = drop_empty_reviews(reviews)
print("Pickling cleaned reviews data")
reviews.to_pickle(CLEANED_PICKLE, compression="gzip")
REVIEWS_DATA_CHANGED = False
gc.collect()
display(reviews)
```
## Exploring our dataset
Now that we've completed our cleaning, we need to assess what kind of model we should use to predict our ratings.
But first, let's see how many distinct values we have in the ratings column.
```
reviews.rating.nunique()
```
We've got 3k+ unique values. This would definitely help us in a Regression model.
But, since we will be predicting single digit ratings, I'm going to round off the column. This will allow us to perform both regression and classification and choose what give's us a better result.
Do note that rounding off generally isn't recommended as it leads to loss of information and will affect the outcome.
I'm only doing this due to computational constraints. You should see better results in Regression models without rounding the values.
```
reviews.rating = reviews.rating.round().astype(int)
reviews
```
We should have 11 distinct values (0 to 10) after this. Let's see how how many ratings of each category we have.
```
reviews.rating.value_counts()
```
We can see that we have less than 50 reviews with a zero rating. Their contribution is fairly negligible so we're going to remove them.
```
class_distrib = reviews.rating.value_counts(ascending=True)
classes_to_drop = class_distrib[class_distrib < 50].index.values
for rating in classes_to_drop:
rows_to_drop = reviews[reviews.rating == rating].index.to_list()
reviews.drop(rows_to_drop, inplace=True)
print("We have {} reviews remaining".format(reviews.shape))
```
Let's visualize the distribution of ratings a bit better by using a histogram
```
fig, ax = plt.subplots(figsize=(10, 8))
sns.barplot(reviews.rating.value_counts().index,
reviews.rating.value_counts(), color="#663399")
ax.set_xlabel("Rating")
ax.set_ylabel("Count")
fig.show()
```
## Splitting dataset for training, validation and testing
Looks like the data is following somewhat of a normal distribution. Most of the reviews lie between 6 to 9. This will definitely bias the model as the dataset is imbalanced.
Now that we have a vague idea about or course of action, let's split the dataset for training and testing. We will be using around 10% of this data mainly due to the volume and computational constraints.
```
X_train, X_test, y_train, y_test = train_test_split(
reviews.comment, reviews.rating, train_size=0.1, test_size=0.1)
X_dev, X_test, y_dev, y_test = train_test_split(X_test, y_test, train_size=0.5)
X_train = X_train.apply(' '.join)
X_dev = X_dev.apply(' '.join)
X_test = X_test.apply(' '.join)
gc.collect()
print("Number of records chosen for training: {}".format(X_train.shape[0]))
print("Number of records chosen for development: {}".format(X_dev.shape[0]))
print("Number of records chosen for testing: {}".format(X_test.shape[0]))
```
Great! We've got that sorted. Since we have 10 unique values in ratings and we're dealing with text, let's start off with a Multinomial Naive Bayes Classifier.
But, before we do that, we need to calculate the TF-IDF values for our reviews.
TF is the term-frequency for every word in the review i.e. the number of times a word has appeared in a review. This is fairly straightforward to calculate by using a counter per review.
IDF (Inverse Document Frequency) is slightly more trickier. Essentially, IDF is the weight of a word across all reviews. It is a measure of how common or rare a word is across the corpus. This helps us understand which words need to be prioritized over others. The closer IDF is to 0, the more common it is and the lesser it will contribute to the model. We need to target words that have a weight closer to 1 as they contribute the most.
This can be calculated by taking the total number of reviews, dividing it by the number of reviews that contain a word, and calculating the logarithm just to balance large numbers that can arise. This is what the final formula looks like:
$tfidf_{i,d} = tf_{i,d} \cdot idf_{i}$
```
vectorizer = TfidfVectorizer()
X_train_vec = vectorizer.fit_transform(X_train)
X_dev_vec = vectorizer.transform(X_dev)
X_test_vec = vectorizer.transform(X_test)
```
## Selecting a model
We will be testing the following models
- Multinomial Naive Bayes (Works well with text data)
- Linear Regression (Target column is a number so maybe it will work)
- Logistic Regression (Works well with categorical data)
First let's give Naive Bayes a shot mainly due to the flexibility we get with text data. Naive Bayes methods are a set of supervised learning algorithms based on applying Bayes’ theorem with the “naive” assumption of conditional independence between every pair of features given the value of the class variable.
This is what the final classification rule looks like:
$\hat{y} = \arg\max_y P(y) \prod_{i=1}^{n} P(x_i \mid y)$
```
accuracy_log = []
mnbc = MultinomialNB()
mnbc.fit(X_train_vec, y_train)
mnbc_predicted = mnbc.predict(X_dev_vec)
acc_score = accuracy_score(mnbc_predicted, y_dev)
mnbc_accuracy = round(acc_score * 100, 2)
mnbc_report = classification_report(
y_dev, mnbc_predicted, digits=4, zero_division=False)
gc.collect()
print("Accuracy for Multinomial Naive Bayes = {}%".format(mnbc_accuracy))
print()
accuracy_log.append(acc_score)
print("Classification Report for Multinomial Naive Bayes")
print(mnbc_report)
print("Confusion Matrix for Multinomial Naive Bayes:")
mnb_cm = confusion_matrix(y_dev, mnbc_predicted, normalize='all')
fig, ax = plt.subplots(figsize=(15, 15))
sns.heatmap(mnb_cm, annot=True, linewidths=0.5, ax=ax,
cmap="Purples", linecolor="#663399", fmt="f", square=True)
fig.show()
```
Let's see how well our model works for a few real-world reviews
```
def check_real_review_predictions(model, vectorizer):
real_reviews = ["This game is absolutely pathetic. Horrible story and characters. Will never play this game again.",
"Worst.",
"One of the best games I've ever played. Amazing story and characters. Recommended.",
"good."]
test_vectorizer = TfidfVectorizer(vocabulary=vectorizer.vocabulary_)
vectorized_review = test_vectorizer.fit_transform(real_reviews)
predicted = model.predict(vectorized_review)
combined = np.vstack((real_reviews, predicted)).T
return combined
preds = check_real_review_predictions(mnbc, vectorizer)
preds
```
Well, this is exactly what we feared. All our ratings are between 6 to 8.
Looks like our model is being biased by our input data. I've got a feeling that some form of regression would work slighlty better here. Let's give Linear Regression a shot.
```
linreg = LinearRegression(n_jobs=4)
linreg.fit(X_train_vec, y_train)
linreg_predicted = linreg.predict(X_dev_vec)
acc_score = accuracy_score(np.round(linreg_predicted), y_dev)
linreg_accuracy_round = round(acc_score * 100, 2)
gc.collect()
print("Accuracy for Linear Regression = {}%".format(linreg_accuracy_round))
accuracy_log.append(acc_score)
```
The accuracy is a little worse but let's check with some of our reviews before we dismiss this idea.
```
preds = check_real_review_predictions(linreg, vectorizer)
preds
```
Perfect. Looks like we are on the right track. Since this is mainly a classification problem, Logistic Regression would be a better fit here. Let's try that:
```
logreg = LogisticRegression(n_jobs=4)
logreg.fit(X_train_vec, y_train)
logreg_predicted = logreg.predict(X_dev_vec)
logreg_accuracy = round(accuracy_score(logreg_predicted, y_dev) * 100, 2)
logreg_report = classification_report(
y_dev, logreg_predicted, digits=4, zero_division=False)
gc.collect()
print("Accuracy for Logistic Regression = {}%".format(logreg_accuracy))
print()
accuracy_log.append(acc_score)
print("Classification Report for Logistic Regression")
print(logreg_report)
```
While our acurracy is better, our solution hasn't fully converged. Let's increase the max iterations to a high value and see where it converges.
We can then use that and see how our model does. The default value is 100 so we will be increasing it to 1000.
```
logreg = LogisticRegression(max_iter=1000, n_jobs=4)
logreg.fit(X_train_vec, y_train)
logreg_predicted = logreg.predict(X_dev_vec)
acc_score = accuracy_score(logreg_predicted, y_dev)
logreg_accuracy = round(acc_score * 100, 2)
logreg_report = classification_report(
y_dev, logreg_predicted, digits=4, zero_division=False)
gc.collect()
print("Accuracy for Logistic Regression = {}%".format(logreg_accuracy))
print()
accuracy_log.append(acc_score)
print("Classification Report for Logistic Regression")
print(logreg_report)
print("Solution converged at iteration = {}".format(logreg.n_iter_[-1]))
```
Let's have a look at the confision matrix and see if things got any better
```
print("Confusion Matrix for Logistic Regression:")
logreg_cm = confusion_matrix(y_dev, logreg_predicted, normalize='all')
fig, ax = plt.subplots(figsize=(15, 15))
sns.heatmap(logreg_cm, annot=True, linewidths=0.5, ax=ax,
cmap="Purples", linecolor="#663399", fmt="f", square=True)
fig.show()
```
While we did lose some accuracy, our solution has converged and the confusion matrix looks slightly better. Let's see how it classifies our real world reviews.
```
preds = check_real_review_predictions(logreg, vectorizer)
preds
```
Perfect! These ratings correspond somewhat to what we were expecting.
But before we move ahead, we will ensure the consistency of these metrics by using Stratified K-Fold cross validation.
Since we are dealing with imbalanced data, a Stratified K-Fold is necessary to maintain the distribution of the input dataset.
```
kfold = KFold(n_splits=3)
fold = 1
for train_index, test_index in kfold.split(X_train, y_train):
X_tr, X_te = X_train[X_train.index.isin(
train_index)], X_train[X_train.index.isin(test_index)]
y_tr, y_te = y_train[y_train.index.isin(
train_index)], y_train[y_train.index.isin(test_index)]
vectorizer = TfidfVectorizer()
X_tr_vec = vectorizer.fit_transform(X_tr)
X_te_vec = vectorizer.transform(X_te)
logreg.fit(X_tr_vec, y_tr)
logreg_predicted = logreg.predict(X_te_vec)
acc_score = accuracy_score(logreg_predicted, y_te)
logreg_accuracy = round(acc_score * 100, 2)
gc.collect()
print("Accuracy for Logistic Regression Fold {} = {}%".format(
fold, logreg_accuracy))
print()
fold += 1
```
Looks like Logistic Regression is performaing fairly well when compared to it's Linear counterpart or even Naive Bayes for that matter. Let's see how the model is performing on the unseen test data.
```
logreg.fit(X_train_vec, y_train)
final_predicted = logreg.predict(X_test_vec)
acc_score = accuracy_score(final_predicted, y_test)
logreg_accuracy = round(acc_score * 100, 2)
print("Final Accuracy for test data with Logistic Regression = {}%".format(
logreg_accuracy))
```
While this isn't the accuracy we were expecting it looks like our accuracy is platueauing. This could be due to the inherent bias in the dataset.
Due to this, I will be training the model using a around 50% of the data and pickling it so that we can use it in our online Flask application to predict any new reviews that a user can input.
```
X_train, _, y_train, _ = train_test_split(
reviews.comment, reviews.rating, train_size=0.5, test_size=0.01)
X_train = X_train.apply(' '.join)
print("Number of records chosen for training the final model = {}".format(
X_train.shape[0]))
vectorizer = TfidfVectorizer()
X_train_vec = vectorizer.fit_transform(X_train)
logreg.fit(X_train_vec, y_train)
gc.collect()
```
We will pickle this model and the vocabulary of the TF-IDF vectorizer. This will be used in the Flask application that we will be building.
```
if os.path.isfile(MODEL_PICKLE) or REVIEWS_DATA_CHANGED:
print('Pickled model already present')
os.remove(DATA_DIR + MODEL_PICKLE)
if os.path.isfile(VOCABULARY_PICKLE) or REVIEWS_DATA_CHANGED:
print('Pickled vocab already present')
os.remove(DATA_DIR + VOCABULARY_PICKLE)
print("Pickling model")
pickle.dump(logreg, open(MODEL_PICKLE, "wb"))
print("Pickling vocabulary")
pickle.dump(vectorizer.vocabulary_, open(VOCABULARY_PICKLE, "wb"))
```
## My contribution
Built a Flask application that predicts the rating of any review. Please visit the page [here](https://karanr.dev/rating-predictor) and enter a review. You should see the prediction for that review. This application has been deployed on Heroku and the form is available on my Portfolio website that is built using GatsbyJS. The code for this application is available on my [GitHub](https://github.com/karanrajpal14/rating-predictor).
You can also see a demo of this application on [YouTube](https://www.youtube.com/watch?v=97LN1FVY6gU).
A copy of the dataset and it's pickle files are available [here](https://drive.google.com/open?id=1uW3E6lwW0A9ay30Q6LLcoUAHYBE9pQUk) and the IPython Notebook can be downloaded from [here](https://www.karanr.dev/rating_predictor.ipynb). **(Right-click and Save As to download the file)**
| github_jupyter |
## 疫情数据分析和预测
疫情数据分析和预测是医学和流行病学应对大范围流行病时的重要判断手段,在医治隔离、预防响应、物资生产调配等抗疫措施上起到参考作用。
以下将通过已知模型尝试寻找合适拟合模型并对目前全球疫情发展作出一定程度的预测。
### 一、逻辑斯蒂模型(Logistic)
(1)模型描述:当一个物种迁入到一个新生态系统中后,其数量会发生变化。假设该物种的起始数量小于环境的最大容纳量,则数量会增长。该物种在此生态系统中有天敌、食物、空间等资源也不足(非理想环境),则增长函数满足逻辑斯谛方程,图像呈S形,此方程是描述在资源有限的条件下种群增长规律的一个最佳数学模型。
(2)一般疾病的传播是S型增长的过程,因为疾病传播的过程中会受到一定的阻力(医治、切断传播途径等措施)。
此处采用最小二乘法,对logistic增长函数进行拟合。以下将检验最小二乘法拟合的逻辑斯蒂模型是否能贴合实际。
```
import numpy as np
import matplotlib.pyplot as plt
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def logistic_increase_function(t,K,P0,r):
t0=11
#r 0.05/0.55/0.65
r = 0.45
# t:time t0:initial time P0:initial_value K:capacity r:increase_rate
exp_value=np.exp(r*(t-t0))
return (K*exp_value*P0)/(K+(exp_value-1)*P0)
'''
1.11日41例
1.18日45例
1.19日62例
1.20日291例
1.21日440例
1.22日571例
1.23日835例
1.24日1297例
1.25日1985例
1.26日2762例
1.27日4535例
'''
# 日期及感染人数
t=[11,18,19,20 ,21, 22, 23, 24, 25, 26, 27]
t=np.array(t)
P=[41,45,62,291,440,571,835,1297,1985,2762,4535]
P=np.array(P)
# 用最小二乘法估计拟合
# 现有数据曲线拟合检验
popt1, pcov1 = curve_fit(logistic_increase_function, t, P)
#获取popt里面是拟合系数
print("K:capacity P0:initial_value r:increase_rate t:time")
print(popt1)
#拟合后预测的P值
P_predict = logistic_increase_function(t,popt1[0],popt1[1],popt1[2])
#未来预测
future=[11,18,19,20 ,21, 22, 23, 24, 25, 26, 27,28,29,30,31,41,51,61,71,81,91,101]
future=np.array(future)
future_predict=logistic_increase_function(future,popt1[0],popt1[1],popt1[2])
#近期情况预测
tomorrow=[28,29,30,32,33,35,37,40]
tomorrow=np.array(tomorrow)
tomorrow_predict=logistic_increase_function(tomorrow,popt1[0],popt1[1],popt1[2])
#绘图
plot1 = plt.plot(t, P, 's',label="confimed infected people number")
plot2 = plt.plot(t, P_predict, 'r',label='predict infected people number')
plot3 = plt.plot(tomorrow, tomorrow_predict, 's',label='tomorrow predict infected people number')
plt.xlabel('time')
plt.ylabel('confimed infected people number')
plt.legend(loc=0) #指定legend的位置右下角
print(logistic_increase_function(np.array(28),popt1[0],popt1[1],popt1[2]))
print(logistic_increase_function(np.array(29),popt1[0],popt1[1],popt1[2]))
plt.show()
```
本次拟合采用了1月11日到1月27日的累计确诊病例数据作为原始数据,采用最小二乘法拟合逻辑斯蒂曲线,最后经过对逻辑斯蒂模型中R值(增长速率,到达K值的速度)的拟合调整,发现在0.45附近得到的曲线比较贴合我国1月至2月疫情实际情况。2月9日的预测值在4万左右,与实际情况十分贴近,也证明了模型的一定可靠性。
将本模型推广,进行全球范围内典型新冠肺炎爆发国家的疫情拟合与未来疫情预测,同时将通过R值的大小反应出该国疫情应对的有效程度。
```
import numpy as np
import matplotlib.pyplot as plt
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def logistic_increase_function(t,K,P0,r):
t0=11
#r
r = 0.08
# t:time t0:initial time P0:initial_value K:capacity r:increase_rate
exp_value=np.exp(r*(t-t0))
return (K*exp_value*P0)/(K+(exp_value-1)*P0)
# 日期及感染人数
n = "../dataSets\\countrydata.csv"
data = pd.read_csv(n)
# 修改国家可以得到不同的曲线拟合情况
data = data[data['countryName'] == '日本']
date_list = list(data['dateId'])
date_list = list(map(lambda x:str(x),date_list))
confirm_list = list(data['confirmedCount'])
time_array = np.array(range(19,len(date_list)+19))
long_time_array = np.array(range(19,len(date_list)+190))
confirm_array = np.array(confirm_list)
# 用最小二乘法估计拟合
# 现有数据曲线拟合预测
popt, pcov = curve_fit(logistic_increase_function, time_array, confirm_array)
#获取popt里面是拟合系数
print("K:capacity P0:initial_value r:increase_rate t:time")
print(popt)
#拟合后预测的P值
P_predict = logistic_increase_function(long_time_array,popt[0],popt[1],popt[2])
#未来预测
#近期情况预测
#绘图
plot1 = plt.plot(time_array, confirm_array, 's',label="confimed infected people number")
plot2 = plt.plot(long_time_array, P_predict, 'r',label='predict infected people number')
plt.xlabel('time')
plt.ylabel('confimed infected people number')
plt.legend(loc=0) #指定legend的位置右下角
print(logistic_increase_function(np.array(28),popt[0],popt[1],popt[2]))
print(logistic_increase_function(np.array(29),popt[0],popt[1],popt[2]))
plt.show()
import numpy as np
import matplotlib.pyplot as plt
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def logistic_increase_function(t,K,P0,r):
t0=11
#r 中国0.25 美国0.05 英国0.08 意大利0.08 德国0.09 韩国0.11
r = 0.05
# t:time t0:initial time P0:initial_value K:capacity r:increase_rate
exp_value=np.exp(r*(t-t0))
return (K*exp_value*P0)/(K+(exp_value-1)*P0)
# 日期及感染人数
n = "../dataSets\\countrydata.csv"
data = pd.read_csv(n)
# 修改国家可以得到不同的曲线拟合情况
data = data[data['countryName'] == '美国']
date_list = list(data['dateId'])
date_list = list(map(lambda x:str(x),date_list))
confirm_list = list(data['confirmedCount'])
time_array = np.array(range(19,len(date_list)+19))
long_time_array = np.array(range(19,len(date_list)+190))
confirm_array = np.array(confirm_list)
# 用最小二乘法估计拟合
# 现有数据曲线拟合预测
popt, pcov = curve_fit(logistic_increase_function, time_array, confirm_array)
#获取popt里面是拟合系数
print("K:capacity P0:initial_value r:increase_rate t:time")
print(popt)
#拟合后预测的P值
P_predict = logistic_increase_function(long_time_array,popt[0],popt[1],popt[2])
#未来预测
#近期情况预测
#绘图
plot1 = plt.plot(time_array, confirm_array, 's',label="confimed infected people number")
plot2 = plt.plot(long_time_array, P_predict, 'r',label='predict infected people number')
plt.xlabel('time')
plt.ylabel('confimed infected people number')
plt.legend(loc=0) #指定legend的位置右下角
print(logistic_increase_function(np.array(28),popt[0],popt[1],popt[2]))
print(logistic_increase_function(np.array(29),popt[0],popt[1],popt[2]))
plt.show()
import numpy as np
import matplotlib.pyplot as plt
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def logistic_increase_function(t,K,P0,r):
t0=11
#r
r = 0.09
# t:time t0:initial time P0:initial_value K:capacity r:increase_rate
exp_value=np.exp(r*(t-t0))
return (K*exp_value*P0)/(K+(exp_value-1)*P0)
# 日期及感染人数
n = "../dataSets\\countrydata.csv"
data = pd.read_csv(n)
# 修改国家可以得到不同的曲线拟合情况
data = data[data['countryName'] == '德国']
date_list = list(data['dateId'])
date_list = list(map(lambda x:str(x),date_list))
confirm_list = list(data['confirmedCount'])
time_array = np.array(range(19,len(date_list)+19))
long_time_array = np.array(range(19,len(date_list)+190))
confirm_array = np.array(confirm_list)
# 用最小二乘法估计拟合
# 现有数据曲线拟合预测
popt, pcov = curve_fit(logistic_increase_function, time_array, confirm_array)
#获取popt里面是拟合系数
print("K:capacity P0:initial_value r:increase_rate t:time")
print(popt)
#拟合后预测的P值
P_predict = logistic_increase_function(long_time_array,popt[0],popt[1],popt[2])
#未来预测
#近期情况预测
#绘图
plot1 = plt.plot(time_array, confirm_array, 's',label="confimed infected people number")
plot2 = plt.plot(long_time_array, P_predict, 'r',label='predict infected people number')
plt.xlabel('time')
plt.ylabel('confimed infected people number')
plt.legend(loc=0) #指定legend的位置右下角
print(logistic_increase_function(np.array(28),popt[0],popt[1],popt[2]))
print(logistic_increase_function(np.array(29),popt[0],popt[1],popt[2]))
plt.show()
import numpy as np
import matplotlib.pyplot as plt
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def logistic_increase_function(t,K,P0,r):
t0=11
#r 中国0.25 美国0.05 英国0.08 意大利0.08 德国0.09 韩国0.11
r = 0.11
# t:time t0:initial time P0:initial_value K:capacity r:increase_rate
exp_value=np.exp(r*(t-t0))
return (K*exp_value*P0)/(K+(exp_value-1)*P0)
# 日期及感染人数
n = "../dataSets\\countrydata.csv"
data = pd.read_csv(n)
# 修改国家可以得到不同的曲线拟合情况
data = data[data['countryName'] == '韩国']
date_list = list(data['dateId'])
date_list = list(map(lambda x:str(x),date_list))
confirm_list = list(data['confirmedCount'])
time_array = np.array(range(19,len(date_list)+19))
long_time_array = np.array(range(19,len(date_list)+190))
confirm_array = np.array(confirm_list)
# 用最小二乘法估计拟合
# 现有数据曲线拟合预测
popt, pcov = curve_fit(logistic_increase_function, time_array, confirm_array)
#获取popt里面是拟合系数
print("K:capacity P0:initial_value r:increase_rate t:time")
print(popt)
#拟合后预测的P值
P_predict = logistic_increase_function(long_time_array,popt[0],popt[1],popt[2])
#未来预测
#近期情况预测
#绘图
plot1 = plt.plot(time_array, confirm_array, 's',label="confimed infected people number")
plot2 = plt.plot(long_time_array, P_predict, 'r',label='predict infected people number')
plt.xlabel('time')
plt.ylabel('confimed infected people number')
plt.legend(loc=0) #指定legend的位置右下角
print(logistic_increase_function(np.array(28),popt[0],popt[1],popt[2]))
print(logistic_increase_function(np.array(29),popt[0],popt[1],popt[2]))
plt.show()
```
关于R值的补充说明:逻辑斯蒂模型中R值代表的增长速率不是传统意义上理解的种群增长速度,而是接近种群数量达到环境承载力K值的速度。强烈的人为干预可以**大幅度降低K值**,使得种群数量快速达到最大值附近,疫情扩散得以控制。所以本模型在预测各国最终累计感染人数的功能之外,拟合过程中R值的大小可以反映某个国家面对新冠肺炎采取措施的**有效性和效率**。一般来说,R值越大,该国防疫措施越有效。
国家 | 中国 |美国 |英国 |德国 |意大利 |韩国| 日本
- | :-: | :-: | :-: | :-: | :-: | :-: | -:
R |0.25 | 0.05 |0.08 |0.09 |0.08 |0.11 |0.08
### 二、SEITR模型
(1)模型简介:SEITR模型是基于动力学SEIR模型不断调试模拟的结果,能够比较合理贴合传染病传播的一般规律。
1)模型中的4类人群:N为总人数
SUSCEPTIBLES: 用S表示,为易感者, 潜在的可感染人群
EXPOSED:用E表示,为潜伏者, 已经被感染但是没有表现出来的人群
INFECTIVES: 用I表示,为感染者, 表现出感染症状的人
RESISTANCES: 用R表示,为抵抗者, 感染者痊愈后获得抗性的人
2)模型中的3种参数:
αß:易感人群(S) 被感染人员(I) 传染的传染率,相当于单人次易感者接触感染者而被感染的几率(ß)与易感者单位时间内接触的感染者人数(α)的乘积
γ:感染人群(I) 以固定平均速率恢复(R) 或死亡的恢复率
Ω:潜伏人群(E) 变为感染者的平均速率,通常数值取潜伏期的倒数
3)增加修正的参数:
“T”:已被感染且正处于接受治疗时期的人群,主要特征表现为已被感染,已过潜伏期,但不会进行传染,且正在被治疗。
同时也将I人群严格定义为被感染,已过潜伏期但未被医院收治无法接受治疗的人群。
δ,表示I变为T的速率,主要受医院接诊速率及收治能力影响,也受发病后及时就医的时间影响。
以下使用SEITR模型对美国疫情基本得到控制的时间进行预测。
```
import scipy.integrate as spi
import numpy as np
import matplotlib.pyplot as plt
# N为人群总数(美国人口大致为3.3亿)
N =330000000
# β为传染率系数(美国实际应该略高)
beta = 0.19
# gamma为恢复率系数
gamma = 0.15
#δ为受到治疗系数(收治率)
δ = 0.3
# Te为疾病潜伏期
Te = 14
# I_0为感染未住院的初始人数
I_0 = 1
# E_0为潜伏者的初始人数
E_0 = 0
# R_0为治愈者的初始人数
R_0 = 0
#T_0为治疗中的初始人数
T_0 = 0
# S_0为易感者的初始人数
S_0 = N - I_0 - E_0 - R_0 - T_0
# T为传播时间
T = 250
# INI为初始状态下的数组
INI = (S_0,E_0,I_0,R_0,T_0)
def funcSEIR(inivalue,_):
Y = np.zeros(5)
X = inivalue
# 易感个体变化
Y[0] = - (beta * X[0] *( X[2]+X[1])) / N
# 潜伏个体变化
Y[1] = (beta * X[0] *( X[2]+X[1])) / N - X[1] / Te
# 感染未住院
Y[2] = X[1] / Te - δ * X[2]
# 治愈个体变化
Y[3] = gamma * X[4]
#治疗中个体变化
Y[4] = δ* X[2] - gamma* X[4]
return Y
T_range = np.arange(0,T + 1)
RES = spi.odeint(funcSEIR,INI,T_range)
plt.plot(RES[:,0],color = 'darkblue',label = 'Susceptible',marker = '.')
plt.plot(RES[:,1],color = 'orange',label = 'Exposed',marker = '.')
plt.plot(RES[:,2],color = 'red',label = 'Infection',marker = '.')
plt.plot(RES[:,3],color = 'green',label = 'Recovery',marker = '.')
plt.plot(RES[:,4],color = 'purple',label = 'Under Treatment',marker = '.')
plt.title('“SEITR” Model')
plt.legend()
plt.xlabel('Day')
plt.ylabel('Number')
plt.show()
```
模型拟合评价:
(1)参数的设置:
1)传染率系数与人与人之间的社交距离和社交频率息息相关,美国在疫情早期未及时向民众宣传保持社交距离和戴口罩、减少出行的建议,导致传染率系数会比参数设置的更高;
2)治疗系数与当地医疗水平、卫生设施数量、医疗物资等息息相关,疫情中期各州的医疗设备全面告急,医护人员感染率上升,同时中产阶级及以下家庭因为无法支付高昂医疗费选择在家隔离,错过最佳治疗期,使得治疗系数要低于已经有雷神山火神山的武汉对应时期的治疗系数;
(2)结果分析:
主要的预测在于感染人数逐渐趋于0的时间节点,本次预测得到的结果是今年秋季美国的疫情能够基本得到控制。
(3)拟合分析;
本模型在尝试同时拟合现有病例(正在接受治疗人群)和治愈人数曲线时,发现无法做到相对同时拟合的比较贴合实际的结果。分析可知,(1)中的参数对拟合结果的影响非常大,而模型参数的选择需要结合美国实际疫情情况才能推算,目前使用的计算手段过于粗糙;同时该模型的假设条件是,美国的0号病人出现在今年1月11日,但是目前的报告陆续显示早在2019年美国就有社区性传播,因此本模型的可靠性大大下降。由于具体的时间目前国际上无法追溯,所以进一步的研究很难继续进行。
| github_jupyter |
# Leave-K-Studies-Out Analysis
- This jupyter notebook is available on-line at:
- https://github.com/spisakt/RPN-signature/blob/master/notebooks/4_leave-k-studies-out.ipynb
- Input data for the notebook and non-standard code (PAINTeR library) is available in the repo:
- https://github.com/spisakt/RPN-signature
- Raw MRI-data from study-centers 1 and 2 are available on OpenNeuro:
- https://openneuro.org/datasets/ds002608/versions/1.0.1
- https://openneuro.org/datasets/ds002609/versions/1.0.3
- Raw data from center 3 is available upon reasonable request.
## Imports
```
import sys
sys.path.append('../')
from PAINTeR import connectivity # in-house lib used for the RPN-signature
from PAINTeR import plot # in-house lib used for the RPN-signature
from PAINTeR import model # in-house lib used for the RPN-signature
import numpy as np # hi old friend
import pandas as pd
from sklearn.preprocessing import StandardScaler
from nilearn.connectome import ConnectivityMeasure
from matplotlib.colors import ListedColormap
from matplotlib.colors import Normalize
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("white")
from sklearn.linear_model import ElasticNet, Ridge
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn import preprocessing
from sklearn.pipeline import Pipeline
from sklearn.model_selection import LeaveOneOut, KFold, GroupKFold, LeavePGroupsOut
from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error, r2_score, explained_variance_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import cross_validate
from neurocombat_sklearn import CombatModel
import scipy.stats as stats
import joblib
```
## Load and merge behavioral data for all three centers (after exclusions)
```
df_bochum = pd.read_csv("../res/bochum_sample_excl.csv")
df_essen = pd.read_csv("../res/essen_sample_excl.csv")
df_szeged = pd.read_csv("../res/szeged_sample_excl.csv")
df_bochum['study']='bochum'
df_essen['study']='essen'
df_szeged['study']='szeged'
df=pd.concat((df_bochum, df_essen, df_szeged), sort=False)
df=df.reset_index()
df
```
## Load preprocessed and scrubbed timeseries data
```
timeseries = []
perc_scrubbed = []
for i, f in enumerate(df['ts_file']):
f = '..' + f.split('/..')[1]
f_scrub = f.split('.tsv')[0] + '-scrubbed.tsv'
ts = pd.read_csv(f_scrub).iloc[:,1:] # here we can omit global signal...
fd_file = df["fd_file"].values[i]
fd_file = '..' + fd_file.split('/..')[1]
fd = pd.read_csv(fd_file).values.ravel().tolist()
fd = [0] + fd
perc_scrubbed.append(100 - 100*len(ts.shape)/len(fd) )
timeseries.append(ts.values)
# double check visually
sub_idx=10 # modify this for a different subject
# modify below for different regions (see the next cell for region names)
pd.DataFrame(timeseries[sub_idx], columns=ts.columns.values).loc[:, ['AINS_pd', 'AINS_v', 'PINS_v']].plot()
```
## Load region and module names for the MIST122 atlas
```
labels=ts.columns.values
l = pd.read_csv('../data/atlas_relabeled.tsv', sep="\t")
modules=np.insert(l['modules'].values, 0, "GlobSig")
print('Region names:\n', labels)
```
## Calculate functional connectivity as partial correlation
```
correlation_measure = ConnectivityMeasure(kind='partial correlation', vectorize=True, discard_diagonal=True)
X = correlation_measure.fit_transform(timeseries) # these are the features
# double-check the mean matrix visually
mat=correlation_measure.mean_
#mat=mat[1:, 1:] #fisrt row and column is global signal
mat[range(mat.shape[0]), range(mat.shape[0])] = 0 # zero diag
plot.plot_matrix(mat, labels, modules)
```
## Pain sensitivity as target variable
```
y = df.mean_QST_pain_sensitivity
```
## Define model to be trained
```
# an oversimplified factory-function, defining the model to be trained:
def pipe_scale_fsel_elnet(scaler=preprocessing.RobustScaler(),
fsel=SelectKBest(f_regression),
model=ElasticNet(max_iter=100000),
p_grid={'fsel__k': [25, 50, 100, 1000, 3000],
'model__alpha': [.001, .005, .01, .05, .1, .5, 1, 5],
'model__l1_ratio': [0.0001, .25, .5, .75, 0.9999]
}):
mymodel = Pipeline(
[
('fsel', fsel),
('scaler', scaler),
('model', model)])
return mymodel, p_grid
model, p_grid = pipe_scale_fsel_elnet()
```
## Nested leave-two-studies-out cross validation (L2SO)
I.e. training on one center only
```
# possible leakage, but can't do the combat transformation on unseen centers
comb = CombatModel()
X_combat = comb.fit_transform(X,
np.array([df.study.astype("category").cat.codes.values]).transpose(),
np.array([df.Male.values]).transpose(),
np.array([df.Age.values]).transpose()
)
outer_cv = LeavePGroupsOut(2) # LeaveTwoStudiesOut
inner_cv = LeaveOneOut() # LeaveOneOut, to maximize training samplke
clf = GridSearchCV(estimator=model, param_grid=p_grid, cv=inner_cv,
scoring="neg_mean_squared_error", verbose=True, return_train_score=False,
n_jobs=-1)
all_models = []
best_params = []
predicted = np.zeros(len(y))
nested_scores_train = np.zeros(outer_cv.get_n_splits(X_combat, groups=df.study))
nested_scores_test = np.zeros(outer_cv.get_n_splits(X_combat, groups=df.study))
print("model\tinner_cv mean score\touter vc score")
i=0
for train, test in outer_cv.split(X_combat, y, groups=df.study):
clf.fit(X_combat[train], y[train])
print('cv:', i, str(clf.best_params_) + " " + str(clf.best_score_) + " " + str(clf.score(X_combat[test], y[test])))
all_models.append(clf.best_estimator_)
best_params.append(clf.best_params_)
predicted[test] = clf.predict(X_combat[test])
nested_scores_train[i] = clf.best_score_
nested_scores_test[i] = clf.score(X_combat[test], y[test])
i = i+1
```
### Print out the unbiased, nested L2SO estimates and plot the (unbiased) nested-cv predictions
```
print("*** Score on mean as model:\t" + str(-mean_squared_error(np.repeat(y.mean(), len(y)), y)))
print("** Mean score in the inner crossvaludation (inner_cv):\t" + str(nested_scores_train.mean()))
print("** Mean Nested Crossvalidation Score (outer_cv):\t" + str(nested_scores_test.mean()))
print("Explained Variance: " + str( 1- nested_scores_test.mean()/-mean_squared_error(np.repeat(y.mean(), len(y)), y) ))
print("Correlation: " + str(np.corrcoef(y, predicted)[0,1]))
plot.plot_prediction(y, predicted, sd=True, covar=[])
```
### Save the nested-cv predictions and the models
```
# save nested L2SO predictions
np.savetxt("../res/multi-center/nested_cv_pred_L1SO.csv", predicted, delimiter=",")
# szeged -> bochum + essen
joblib.dump(all_models[0], '../res/multi-center/model_trained_on_szeged.joblib')
# essen -> bochum + szeged
joblib.dump(all_models[1], '../res/multi-center/model_trained_on_essen.joblib')
# bochum -> essen + szeged
joblib.dump(all_models[2], '../res/multi-center/model_trained_on_bochum.joblib') # RPN-signature
```
## Nested leave-one-study-out cross validation (L1SO)
I.e. training on one center only
```
# possible leakage, but can't do the combat transformation on unseen centers
comb = CombatModel()
X_combat = comb.fit_transform(X,
np.array([df.study.astype("category").cat.codes.values]).transpose(),
np.array([df.Male.values]).transpose(),
np.array([df.Age.values]).transpose()
)
outer_cv = LeavePGroupsOut(1) # LeaveOneStudyOut
inner_cv = LeaveOneOut() # LeaveOneOut, to maximize training samplke
clf = GridSearchCV(estimator=model, param_grid=p_grid, cv=inner_cv,
scoring="neg_mean_squared_error", verbose=True, return_train_score=False,
n_jobs=-1)
all_models = []
best_params = []
predicted = np.zeros(len(y))
nested_scores_train = np.zeros(outer_cv.get_n_splits(X_combat, groups=df.study))
nested_scores_test = np.zeros(outer_cv.get_n_splits(X_combat, groups=df.study))
print("model\tinner_cv mean score\touter vc score")
i=0
for train, test in outer_cv.split(X_combat, y, groups=df.study):
clf.fit(X_combat[train], y[train])
print('cv:', i, str(clf.best_params_) + " " + str(clf.best_score_) + " " + str(clf.score(X_combat[test], y[test])))
all_models.append(clf.best_estimator_)
best_params.append(clf.best_params_)
predicted[test] = clf.predict(X_combat[test])
nested_scores_train[i] = clf.best_score_
nested_scores_test[i] = clf.score(X_combat[test], y[test])
i = i+1
```
### Print out the unbiased, nested estimates and plot the (unbiased) nested-cv predictions
```
print("*** Score on mean as model:\t" + str(-mean_squared_error(np.repeat(y.mean(), len(y)), y)))
print("** Mean score in the inner crossvaludation (inner_cv):\t" + str(nested_scores_train.mean()))
print("** Mean Nested Crossvalidation Score (outer_cv):\t" + str(nested_scores_test.mean()))
print("Explained Variance: " + str( 1- nested_scores_test.mean()/-mean_squared_error(np.repeat(y.mean(), len(y)), y) ))
print("Correlation: " + str(np.corrcoef(y, predicted)[0,1]))
plot.plot_prediction(y, predicted, sd=True, covar=[])
```
### Save the nested-cv predictions and the models
```
# save nested L2SO predictions
np.savetxt("../res/multi-center/nested_cv_pred_L2SO.csv", predicted, delimiter=",")
# essen+szeged -> bochum
joblib.dump(all_models[0], '../res/multi-center/model_trained_on_essen+szeged.joblib')
# bochum+szeged -> essen
joblib.dump(all_models[1], '../res/multi-center/model_trained_on_bochum+szeged.joblib')
# bochum+essen -> szeged
joblib.dump(all_models[2], '../res/multi-center/model_trained_on_bochum+essen.joblib')
```
## Analyze L2SO
```
#L2SO
predicted_l2so = np.genfromtxt("../res/multi-center/nested_cv_pred_L1SO.csv", delimiter=",")
from mlxtend.evaluate import permutation_test
y_true = y[df.study!='bochum']
y_hat = predicted_l2so[df.study!='bochum']
print("Explained Variance: ", explained_variance_score(y_true, y_hat))
print("Correlation: ", str(np.corrcoef(y_true, y_hat)[0,1]))
p_corr = permutation_test(y_true, y_hat,
func=lambda x, y: np.corrcoef(x, y)[0,1],
method='approximate',
num_rounds=8000,
seed=42)
print('p =', p_corr)
plot.plot_prediction(y_true, y_hat, sd=True, covar=[])
y_true = y[df.study!='essen']
y_hat = predicted_l2so[df.study!='essen']
print("Explained Variance: ", explained_variance_score(y_true, y_hat))
print("Correlation: ", str(np.corrcoef(y_true, y_hat)[0,1]))
p_corr = permutation_test(y_true, y_hat,
func=lambda x, y: np.corrcoef(x, y)[0,1],
method='approximate',
num_rounds=8000,
seed=42)
print('p =', p_corr)
plot.plot_prediction(y_true, y_hat, sd=True, covar=[])
y_true = y[df.study!='szeged']
y_hat = predicted_l2so[df.study!='szeged']
print("Explained Variance: ", explained_variance_score(y_true, y_hat))
print("Correlation: ", str(np.corrcoef(y_true, y_hat)[0,1]))
p_corr = permutation_test(y_true, y_hat,
func=lambda x, y: np.corrcoef(x, y)[0,1],
method='approximate',
num_rounds=8000,
seed=42)
print('p =', p_corr)
plot.plot_prediction(y_true, y_hat, sd=True, covar=[])
p_corr = permutation_test(y, predicted_l2so,
func=lambda x, y: np.corrcoef(x, y)[0,1],
method='approximate',
num_rounds=8000,
seed=42)
print('p =', p_corr)
```
## Analyze L1SO
```
#L1SO
predicted_l1so = np.genfromtxt("../res/multi-center/nested_cv_pred_L2SO.csv", delimiter=",") # typo in name
from mlxtend.evaluate import permutation_test
y_true = y[df.study=='bochum']
y_hat = predicted_l1so[df.study=='bochum']
print("Explained Variance: ", explained_variance_score(y_true, y_hat))
print("Correlation: ", str(np.corrcoef(y_true, y_hat)[0,1]))
p_corr = permutation_test(y_true, y_hat,
func=lambda x, y: np.corrcoef(x, y)[0,1],
method='approximate',
num_rounds=8000,
seed=42)
print('p =', p_corr)
plot.plot_prediction(y_true, y_hat, sd=True, covar=[])
y_true = y[df.study=='essen']
y_hat = predicted_l1so[df.study=='essen']
print("Explained Variance: ", explained_variance_score(y_true, y_hat))
print("Correlation: ", str(np.corrcoef(y_true, y_hat)[0,1]))
p_corr = permutation_test(y_true, y_hat,
func=lambda x, y: np.corrcoef(x, y)[0,1],
method='approximate',
num_rounds=8000,
seed=42)
print('p =', p_corr)
plot.plot_prediction(y_true, y_hat, sd=True, covar=[])
y_true = y[df.study=='szeged']
y_hat = predicted_l1so[df.study=='szeged']
print("Explained Variance: ", explained_variance_score(y_true, y_hat))
print("Correlation: ", str(np.corrcoef(y_true, y_hat)[0,1]))
p_corr = permutation_test(y_true, y_hat,
func=lambda x, y: np.corrcoef(x, y)[0,1],
method='approximate',
num_rounds=8000,
seed=42)
print('p =', p_corr)
plot.plot_prediction(y_true, y_hat, sd=True, covar=[])
```
| github_jupyter |
# Taylor Problem 16.14 version A
We'll plot at various times a wave $u(x,t)$ that is defined by its initial shape at $t=0$ from $x=0$ to $x=L$, using a Fourier sine series to write the result at a general time t:
$\begin{align}
u(x,t) = \sum_{n=1}^{\infty} B_n \sin(k_n x)\cos(\omega_n t)
\;,
\end{align}$
with $k_n = n\pi/L$ and $\omega_n = k_n c$, where $c$ is the wave speed. Here the coefficients are given by
$\begin{align}
B_n = \frac{2}{L}\int_0^L u(x,0) \sin\frac{n\pi x}{L} \, dx
\;.
\end{align}$
* Created 28-Mar-2019. Last revised 30-Mar-2019 by Dick Furnstahl (furnstahl.1@osu.edu).
* This version sums only over odd n, which are called $m = 2n + 1$.
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation, rc
from IPython.display import HTML
```
First define functions for the $t=0$ wave function form (here a triangle) and for the subsequent shape at any time $t$ based on the wave speed `c_wave`.
```
def B_coeff(m):
"""Fourier coefficient for the n = 2*m + 1 term in the expansion.
"""
temp = 2.*m + 1
return (-1)**m * 8. / (temp * np.pi)**2
def k(m, L):
"""Wave number for n = 2*m + 1.
"""
return (2.*m + 1.) * np.pi / L
def u_triangular(x_pts, t, m_max=20, c_wave=1., L=1.):
"""Returns the wave from the sum of Fourier components.
"""
y_pts = np.zeros(len(x_pts)) # define y_pts as the same size as x_pts
for m in np.arange(m_max):
y_pts += B_coeff(m) * np.sin(k(m, L) * x_pts) \
* np.cos(k(m, L) * c_wave * t)
return y_pts
```
First look at the initial ($t=0$) wave form.
```
L = 1.
m_max = 20
c_wave = 1
omega_1 = np.pi * c_wave / L
tau = 2.*np.pi / omega_1
# Set up the array of x points (whatever looks good)
x_min = 0.
x_max = L
delta_x = 0.01
x_pts = np.arange(x_min, x_max, delta_x)
# Make a figure showing the initial wave.
t_now = 0.
fig = plt.figure(figsize=(6,4), num='Standing wave')
ax = fig.add_subplot(1,1,1)
ax.set_xlim(x_min, x_max)
gap = 0.1
ax.set_ylim(-1. - gap, 1. + gap)
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$u(x, t=0)$')
ax.set_title(rf'$t = {t_now:.1f}$')
line, = ax.plot(x_pts,
u_triangular(x_pts, t_now, m_max, c_wave, L),
color='blue', lw=2)
fig.tight_layout()
```
Next make some plots at an array of time points.
```
t_array = tau * np.arange(0., 1.125, .125)
fig_array = plt.figure(figsize=(12,12), num='Standing wave')
for i, t_now in enumerate(t_array):
ax_array = fig_array.add_subplot(3, 3, i+1)
ax_array.set_xlim(x_min, x_max)
gap = 0.1
ax_array.set_ylim(-1. - gap, 1. + gap)
ax_array.set_xlabel(r'$x$')
ax_array.set_ylabel(r'$u(x, t)$')
ax_array.set_title(rf'$t = {t_now/tau:.3f}\tau$')
ax_array.plot(x_pts,
u_triangular(x_pts, t_now, m_max, c_wave, L),
color='blue', lw=2)
fig_array.tight_layout()
fig_array.savefig('Taylor_Problem_16p14.png',
bbox_inches='tight')
```
Now it is time to animate!
```
# Set up the t mesh for the animation. The maximum value of t shown in
# the movie will be t_min + delta_t * frame_number
t_min = 0. # You can make this negative to see what happens before t=0!
t_max = 2.*tau
delta_t = t_max / 100.
t_pts = np.arange(t_min, t_max + delta_t, delta_t)
```
We use the cell "magic" `%%capture` to keep the figure from being shown here. If we didn't the animated version below would be blank.
```
%%capture
fig_anim = plt.figure(figsize=(6,3), num='Triangular wave')
ax_anim = fig_anim.add_subplot(1,1,1)
ax_anim.set_xlim(x_min, x_max)
gap = 0.1
ax_anim.set_ylim(-1. - gap, 1. + gap)
# By assigning the first return from plot to line_anim, we can later change
# the values in the line.
line_anim, = ax_anim.plot(x_pts,
u_triangular(x_pts, t_min, m_max, c_wave, L),
color='blue', lw=2)
fig_anim.tight_layout()
def animate_wave(i):
"""This is the function called by FuncAnimation to create each frame,
numbered by i. So each i corresponds to a point in the t_pts
array, with index i.
"""
t = t_pts[i]
y_pts = u_triangular(x_pts, t, m_max, c_wave, L)
line_anim.set_data(x_pts, y_pts) # overwrite line_anim with new points
return (line_anim,) # this is needed for blit=True to work
frame_interval = 80. # time between frames
frame_number = 101 # number of frames to include (index of t_pts)
anim = animation.FuncAnimation(fig_anim,
animate_wave,
init_func=None,
frames=frame_number,
interval=frame_interval,
blit=True,
repeat=False)
HTML(anim.to_jshtml()) # animate using javascript
```
| github_jupyter |
```
import ipywidgets as widgets
from ipywidgets import Accordion, HBox
from ipywidgets import FileUpload, Button
from ipyfilechooser import FileChooser
import json
import html
metadata={}
def _observe_elec_config(change):
print('_observe_elec_config')
metadata[ widget_elec_config.description] = widget_elec_config.value
metadata_json_raw = json.dumps(metadata, indent=4)
export.value = "<pre>{}</pre>".format(
html.escape(metadata_json_raw))
export = widgets.HTML()
vbox_metadata = widgets.VBox(
[
widgets.HTML('''
<h4>Preview of metadata export:</h4>
<hr style="height:1px;border-width:0;color:black;background-color:gray">
'''),
export
]
)
widget_elec_spacing = widgets.Text(
description='Electrode spacing:')
widget_elec_config = widgets.RadioButtons(
options=['1D', '2D','3D'],
default='1D',
description='Econfig',
disabled=False)
widget_instrument = widgets.Text(
description='Instrument')
widget_xy_coords_file = FileChooser(use_dir_icons=True, title='upload dataset')
vbox_minreq = widgets.VBox(
[widget_elec_spacing, widget_elec_config]
)
accordion = widgets.Accordion(children=[vbox_minreq,widget_xy_coords_file], titles=('Min meta', 'Upload'))
#accordion = widgets.Accordion(children=[widget_elec_config,widget_xy_coords_file], titles=('Min meta', 'Upload'))
#accordion = widgets.Accordion(children=[widget_instrument,widget_xy_coords_file], titles=('Min meta', 'Upload'))
accordion.set_title(0,'Min meta')
accordion.set_title(1,'Upload')
TL_widget_elec_config = []
TL_accordion_minmeta=[]
tab_nest = widgets.Tab()
tab_nest.children = [accordion]
#tab_nest.titles = ('An accordion', 'Copy of the accordion')
widget_nb_files_TL = widgets.IntText(
description='Nb of steps',
value=1
)
#metadata_tab = widgets.Text(description='preview metadata')
tab = widgets.Tab()
tab.children = [tab_nest, vbox_metadata]
tab.set_title(0,'Home')
tab.set_title(1,'Metadata')
print(accordion.children)
import numpy as np
accordion = widgets.Accordion(children=[vbox_minreq,widget_xy_coords_file], titles=('Min meta', 'Upload'), selected_index = 0)
#accordion3 = widgets.Accordion(children=[vbox_minreq,widget_xy_coords_file], titles=('Min meta', 'Upload'))
tab_nest.children = [accordion]
tab_nest.titles = ('An accordion')
tab = widgets.Tab()
tab.children = [tab_nest, vbox_metadata]
tab.set_title(0,'Home')
tab.set_title(1,'Metadata')
tab
def add_children_test(change): # add children tabs to existing root tab
print('add children')
vboxTL_min_req = []
TL_upload = []
accordionTL = []
TL_tab = widgets.Tab()
TL_names = []
def rmv_children_tab(change):
print('rmv')
#id = TL_tab.selected_index
#TL_tab.selected_index = None # Warning : this will emit a change event
# TL_tab.selected_index = id
#print(TL_tab.selected_index)
def rmv_selected(change):
print(change)
print('change')
#tab_nest['selected_index']
#TL_tab.selected_index.close
#TL_tab.observe(lambda change: print(f"selected index: {change['new']}") , names='selected_index')
#TL_tab.close
#TL_tab.children[TL_tab.selected_index].layout.display = 'none'
#TL_tab.children[TL_tab.selected_index].close
#tab_nest.observe(rmv_selected,'value')
print(tab_nest)
#accordion.selected_index = None
#np.arange(0,widget_nb_files_TL.value)
#tab_len = np.arange(0,widget_nb_files_TL.value)
#tab_id = np.delete(tab_len,TL_tab.selected_index)
#accordion.selected_index = tab_id
#tab.children = [accordion, vbox_metadata]
def rmv_children_all(change): # add children tabs to existing root tab
print('rmv')
tab_nest.close
tab.children = [accordion, vbox_metadata]
for steps in enumerate(np.arange(0,widget_nb_files_TL.value)): # tab nest = time lapse
#for steps in enumerate(np.arange(0,3)): # tab nest = time lapse
TL_names.append('step'+ str(steps[0]))
for child in tab_nest.children: # second accordion
for child_2nd in child.children: # 1st/second accordion
if child_2nd._view_name == 'VBoxView':
TL_min_req = []
try:
child_2nd.selected
TL_upload.append(FileChooser(use_dir_icons=True, title=child_2nd.title + str(steps[0])))
#print(child_2nd.selected)
print(child_2nd)
except:
for child_3rd in child_2nd.children: # loop within each accordions
if child_3rd._view_name == 'TextView':
TL_min_req.append(widgets.Text(child_3rd.description + str(steps[0])))
if child_3rd._view_name == 'RadioButtonsView':
TL_min_req.append(widgets.RadioButtons(options=child_3rd.options,
value=child_3rd.value,
description=child_3rd.description + str(steps[0]),
disabled=False))
vboxTL_min_req.append(widgets.VBox(TL_min_req))
widget_add_tab = widgets.IntText(description='Add new tab')
deleteAll = widgets.Button(icon="trash", description='Remove all TL tabs')
deleteOne = widgets.Button(icon="trash", description='Remove selected TL tab')
deleteAll.on_click(rmv_children_all)
deleteOne.on_click(rmv_children_tab)
#accordionTL.append(widgets.HBox([widget_remove_tab,delete]))
accordionTL.append(widgets.Accordion(children=[vboxTL_min_req[steps[0]],TL_upload[steps[0]]],
titles=('Min meta', 'Upload')))
#accordionTL[steps[0]].set_title(index=0, title='Min meta')
#accordionTL[steps[0]].set_title(index=1, title='Upload')
#print('len acc' + str(len(accordionTL)))
#print(accordionTL[steps[0]])
TL_tab.children = accordionTL
[TL_tab.set_title(num,name) for num, name in enumerate(TL_names)]
vbox_tab_TL = widgets.VBox([widgets.HBox([widget_add_tab,deleteOne,deleteAll]),TL_tab])
tab.children = [vbox_tab_TL, vbox_metadata]
def _observe_minreq_TL():
for child in TL_tab.children: # second accordion
child.observe(_observe_TL, 'value')
if hasattr(steps, 'description'):
metadata[steps.description] = steps.value
metadata_json_raw = json.dumps(metadata, indent=4)
export.value = "<pre>{}</pre>".format(
html.escape(metadata_json_raw))
def _observe_TL(change):
#if change['type'] == 'change' and change['name'] == 'value':
print('change:' + str(change['new']))
for child in TL_tab.children: # second accordion
_observe_minreq_TL()
for child in TL_tab.children:
print('child observe:' + str(child))
for child2 in child.children:
try:
child2.register_callback(_observe_upload_TL)
except:
pass
def report_children_change(change):
print('ie change observed'+str(change))
def _observe_Test(change):
#if change['type'] == 'change' and change['name'] == 'value':
print(change)
#tab.observe(report_children_change)
widget_nb_files_TL.observe(add_children_test, 'value')
Box = widgets.VBox([tab,widget_nb_files_TL])
display(Box)
```
| github_jupyter |
```
import numpy as np
from scipy.stats import norm
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
# Beginning in one dimension:
# mean = 0; Var = 1; N = 100
# scatter = np.random.normal(mean,np.sqrt(Var),N)
# scatter = np.sort(scatter)
t = [189.6071000099182, 191.2862000465393, 191.9226999282837, 192.32009983062744, 194.59099984169006, 194.69189977645874, 194.81200003623962, 195.4136998653412, 195.8914999961853, 196.71169996261597, 196.78150010108948, 197.03410005569458, 197.1198000907898, 197.76399993896484, 197.80709981918335, 197.87769985198975, 197.9296998977661, 198.16470003128052, 198.34800004959106, 198.38380002975464, 198.55809998512268, 198.59619998931885, 198.60689997673035, 198.7994999885559, 198.8127999305725, 198.87769985198975, 198.8961000442505, 199.03090000152588, 199.06170010566711, 199.11590003967285, 199.11650013923645, 199.23480010032654, 199.26859998703003, 199.28539991378784, 199.28569984436035, 199.2901999950409, 199.30789995193481, 199.38619995117188, 199.4277000427246, 199.54820013046265, 199.60840010643005, 199.68470001220703, 199.79959988594055, 199.80520009994507, 199.87220001220703, 199.92110013961792, 199.93280005455017, 199.96449995040894, 200.02769994735718, 200.15759992599487, 200.176500082016, 200.29069995880127, 200.33640003204346, 200.42440009117126, 200.74760007858276, 200.76629996299744, 201.10800004005432, 201.36319994926453, 201.56420016288757, 201.64739990234375, 203.52499985694885, 203.7118000984192, 206.8543999195099, 208.2497000694275, 210.16980004310608, 211.22909998893738, 212.25820016860962, 213.3011999130249, 216.19820022583008, 218.5029001235962, 219.21879982948303, 219.2882001399994, 223.16790008544922, 226.58719992637634, 227.41970014572144, 228.24290013313293, 231.29270005226135, 231.67810010910034, 232.58859992027283, 233.24539995193481, 233.4776999950409, 235.769700050354, 237.4837999343872, 237.61409997940063, 240.57780003547668, 241.18440008163452, 241.85349988937378, 243.439199924469, 243.5006000995636, 243.57949995994568, 245.11009979248047, 246.33259987831116, 246.59170007705688, 247.40899991989136, 250.1579999923706, 250.61940002441406, 250.6965000629425, 251.95709991455078, 252.30920004844666, 252.87159991264343, 252.99370002746582, 253.6926999092102, 255.32419991493225, 258.3877999782562, 259.0522999763489, 259.2037000656128, 259.2242000102997, 259.83120012283325, 259.84759998321533, 259.9684000015259, 261.8134000301361, 263.59940004348755, 264.8375999927521, 267.7960000038147, 267.8507001399994, 268.83990001678467, 269.04620003700256, 269.05509996414185, 269.33240008354187, 269.5037000179291, 269.7397999763489, 270.90319991111755, 272.0377998352051, 272.0747001171112, 272.1542999744415, 272.35350012779236, 272.3550000190735, 272.668399810791, 272.90750002861023, 273.2815001010895, 274.6137001514435, 274.6651999950409, 275.06919980049133, 275.2734000682831, 275.6498999595642, 275.88120007514954, 276.1007001399994, 276.73189997673035, 277.02559995651245, 277.47670006752014, 277.8952000141144, 277.91939997673035, 278.1614000797272, 278.59540009498596, 279.251699924469, 279.6147999763489, 281.0438001155853, 281.07920002937317, 281.2242000102997, 282.16820001602173, 282.9784998893738, 282.9980001449585, 283.688099861145, 283.9584000110626, 284.77300000190735, 285.3097999095917, 285.9256000518799, 287.29639983177185, 287.6809000968933, 287.81550002098083, 288.0566999912262, 291.2125999927521, 291.46810007095337, 293.32620000839233, 293.80739998817444, 294.0341999530792, 295.0464999675751, 295.27039980888367, 297.28690004348755, 297.48359990119934, 297.68649983406067, 297.73920011520386, 298.1391999721527, 298.54929995536804, 299.3740999698639, 299.5074999332428, 299.67120003700256, 299.81270003318787, 299.9292998313904, 300.22179985046387, 300.3617000579834, 300.69840002059937, 300.88229990005493, 301.0942997932434, 301.24150013923645, 301.30570006370544, 301.33369994163513, 301.3472001552582, 301.38059997558594, 302.1319999694824, 302.18330001831055, 302.23539996147156, 302.3315999507904, 302.3568000793457, 302.36070013046265, 302.487900018692, 303.50329995155334, 304.3765001296997, 305.20849990844727, 305.25220012664795, 305.34370017051697, 305.4293999671936, 305.47379994392395, 306.4015998840332, 306.4570999145508, 306.7078001499176, 307.00709986686707, 307.1610999107361, 307.4059998989105, 307.5889000892639, 308.44840002059937, 308.5464999675751, 316.46250009536743, 316.74259996414185, 317.188099861145, 318.49349999427795, 318.5443000793457, 318.6096999645233, 318.9563000202179, 319.61789989471436, 320.3954999446869, 320.58780002593994, 321.634299993515, 322.1167998313904, 322.1172001361847, 322.1802999973297, 322.9318001270294, 324.82249999046326, 324.86469984054565, 325.91390013694763, 325.9948000907898, 326.1133999824524, 326.446799993515, 326.906800031662, 327.6143000125885, 328.6284999847412, 329.0639998912811, 329.90120005607605, 331.6512999534607, 332.80840015411377, 333.9262001514435, 334.44810009002686, 335.5722999572754, 335.6867001056671, 336.7762999534607, 337.8452000617981, 338.3204998970032, 338.96050000190735, 339.78570008277893, 340.2051000595093, 340.3507001399994, 341.5891001224518, 341.63809990882874, 343.3168001174927, 344.00020003318787, 344.0559000968933, 345.519700050354, 345.6105999946594, 345.7255001068115, 346.04819989204407, 346.0757999420166, 346.4703998565674, 346.9500000476837, 347.48970007896423, 347.68649983406067, 348.1500999927521, 348.50789999961853, 348.8159999847412, 348.81700015068054, 348.8840000629425, 348.93190002441406, 349.0747001171112, 349.1642999649048, 349.81089997291565, 349.93710017204285, 350.6523001194, 350.7711000442505, 351.79439997673035, 352.09860014915466, 352.18729996681213, 352.57480001449585, 352.7698998451233, 352.9992001056671, 353.12720012664795, 353.5209000110626, 354.969899892807, 355.66739988327026, 356.28539991378784, 357.6537001132965, 357.928200006485, 358.0456998348236, 358.2394998073578, 358.5462999343872, 359.0153999328613, 359.1082000732422, 359.7950999736786, 359.82130002975464, 360.8533000946045, 361.08629989624023, 361.47239995002747, 361.7726001739502, 362.0262999534607, 362.0757999420166, 362.17980003356934, 362.2457001209259, 362.2590000629425, 363.95120000839233, 364.4533998966217, 365.431599855423, 366.62080001831055, 367.0819001197815, 369.2527000904083, 370.7765998840332, 370.8450999259949, 371.5557999610901, 372.8712999820709, 374.1957998275757, 374.8885998725891, 379.8382000923157, 379.86250019073486, 381.678200006485, 381.7116000652313, 382.7015998363495, 383.42400002479553, 384.49149990081787, 385.81610012054443, 387.0953001976013, 389.61190009117126, 392.09999990463257, 393.84220004081726, 396.26430010795593, 396.2650001049042, 397.0871000289917, 397.08929991722107, 397.7426998615265, 398.1346001625061, 399.49410009384155, 399.5038001537323, 399.99399995803833, 400.6917998790741, 400.98969984054565, 403.94350004196167, 404.0395998954773, 404.59699988365173, 404.6835000514984, 405.4930000305176, 406.2351999282837, 406.46550011634827, 407.2270998954773, 408.00160002708435, 409.4976999759674, 412.1075999736786, 414.2595000267029, 414.36210012435913, 414.9379999637604, 416.5566999912262, 416.5749001502991, 416.5896999835968, 418.04209995269775, 418.6231999397278, 418.7039999961853, 419.20789980888367, 420.0859999656677, 420.2603998184204, 420.67400002479553, 421.7853000164032, 422.0877001285553, 425.32510018348694, 427.38650012016296, 427.88639998435974, 428.1428999900818, 428.926500082016, 429.20300006866455, 429.23849987983704, 430.73619985580444, 431.32559990882874, 432.56199979782104, 433.4599997997284, 434.61559987068176, 434.97499990463257, 435.97259998321533, 436.6355001926422, 437.95120000839233, 437.96429991722107, 439.66680002212524, 440.2565999031067, 441.78629994392395, 441.91820001602173, 442.3288998603821, 446.6708998680115, 448.87810015678406, 452.75, 453.41050004959106, 455.1205999851227, 456.0848000049591, 456.19320011138916, 456.9457998275757, 461.78250002861023, 464.80830001831055, 468.03509998321533, 468.634299993515, 468.94090008735657, 471.0374000072479, 473.3707001209259, 475.1545000076294, 475.7686998844147, 476.3066999912262, 476.5549998283386, 476.71939992904663, 476.84540009498596, 476.88560009002686, 477.498300075531, 477.7546000480652, 478.4089000225067, 478.62720012664795, 478.76550006866455, 478.78539991378784, 479.44370007514954, 480.10689997673035, 480.1270000934601, 480.9742999076843, 481.1449999809265, 481.41799998283386, 481.74210000038147, 482.376699924469, 483.3386001586914, 483.3827998638153, 484.023099899292, 484.02680015563965, 484.2613000869751, 484.28769993782043, 484.4710998535156, 484.57329988479614, 484.58179998397827, 485.16129994392395, 485.72640013694763, 486.674800157547, 487.82829999923706, 490.12739992141724, 490.1569998264313, 490.4675998687744, 491.092600107193, 491.3041000366211, 494.5938000679016, 495.1322000026703, 496.2479000091553, 496.26749992370605, 498.7243001461029, 501.0044000148773, 502.729199886322, 513.9409999847412, 514.3407998085022, 516.0920000076294, 520.3984999656677, 521.0588998794556, 542.6861999034882, 589.6159000396729, 601.6647000312805, 602.648600101471, 603.5119998455048, 615.537700176239, 625.2442998886108, 636.2081999778748, 673.241800069809]
scatter = np.array(t)
mu = np.mean(scatter)
Var = np.std(scatter, ddof=1)
N = len(scatter)
mu1,sigma1 = norm.fit(scatter) # classical fit
scat_sum = np.cumsum(np.ones(scatter.shape))/N # cumulative samples
[mu2,sigma2],Cx = curve_fit(norm.cdf, scatter, scat_sum, p0=[0,1]) # curve fit
print(u"norm.fit(): µ1= {:+.4f}, σ1={:.4f}".format(mu1, sigma1))
print(u"curve_fit(): µ2= {:+.4f}, σ2={:.4f}".format(mu2, sigma2))
t = np.linspace(min(scatter),max(scatter), len(scatter))
print t
print norm.cdf(t, mu1, sigma1)
fg = plt.figure(1); fg.clf()
ax = fg.add_subplot(1, 1, 1)
ax.plot(t, norm.cdf(t, mu1, sigma1), alpha=.5, label="norm.fit()")
ax.step(scatter, scat_sum, '-', where='post', alpha=.5, label="Samples")
ax.legend(loc="best")
ax.grid(True)
ax.set_xlabel("$x$")
ax.set_ylabel("Cumulative Probability Density")
ax.set_title("Fit to Normal Distribution")
fg.canvas.draw()
plt.show()
fg = plt.figure(1); fg.clf()
ax = fg.add_subplot(1, 1, 1)
t = np.linspace(min(scatter),max(scatter), len(scatter))
# ax.plot(t, norm.cdf(t, mu1, sigma1), alpha=.5, label="norm.fit()")
ax.plot(t, norm.cdf(t, mu2, sigma2), alpha=.5, label="curve_fit()")
ax.legend(loc="best")
ax.grid(True)
ax.set_xlabel("$x$")
ax.set_ylabel("Cumulative Probability Density")
ax.set_title("Fit to Normal Distribution")
fg.canvas.draw()
plt.show()
%matplotlib inline
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats as stats
def measures_of_center(durations):
m = {}
m['mu'] = np.mean(durations)
m['sem'] = stats.sem(durations)
m['median'] = np.median(durations)
m['mode'] = stats.mstats.mode(durations)
return m
rolls = []
for i in range(10000):
roll = random.choice([1,2,3,4,5,6])
rolls.append(roll)
# pd.DataFrame(rolls).plot(kind="density", # Plot the distribution
# figsize=(9,9),
# xlim=(-1,7))
# stats.uniform.cdf(x=6, # Cutoff value (quantile) to check
# loc=1, # Distribution start
# scale=6) # Distribution end
mrolls = measures_of_center(rolls)
print mrolls
plt.hist(rolls, bins='fd', normed=1)
```
| github_jupyter |
# Lecture 06: Examples and overview
[Download on GitHub](https://github.com/NumEconCopenhagen/lectures-2021)
[<img src="https://mybinder.org/badge_logo.svg">](https://mybinder.org/v2/gh/NumEconCopenhagen/lectures-2021/master?urlpath=lab/tree/06/Examples_and_overview.ipynb)
1. [Recap](#Recap)
2. [The consumer problem](#The-consumer-problem)
3. [A worker-capitalist production economy](#A-worker-capitalist-production-economy)
4. [Inuagural project from last year (labor supply and taxation)](#Inuagural-project-from-last-year-(labor-supply-and-taxation))
5. [Summary](#Summary)
You now have all the basic tools to solve interesting economic models. The trick is to be able to combine what you know to solve problems in practice. We firstly briefly recap, with a focus solving optimization problems and non-linear equations. Afterwards, we consider a number of examples.
1. The consumer problem
2. A worker-capitalist production economy
3. The inaugurual project from 2020 (labor supply and taxation)
```
# magic to reload modules automatically
%load_ext autoreload
%autoreload 2
# standard imports
from types import SimpleNamespace # new? explained below
import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
```
<a id="Recap"></a>
# 1. Recap
2. **Primitives:** types, operators, copy vs. view, conditionals, loops, functions, classes
3. **Optimize, print and plot:** mathematics (numpy), printing, figures (matplotlib), solving optimization problems and equations (scipy.optimize)
4. **Random numbers and simulation:** random numbers (numpy.random), save/load (pickle), interactive figures (ipywidgets)
5. **Workflow and debugging:** structuring, naming, commenting, debugging (assert, try-except), modules
**Sum up:** Lots and lots of information. The important thing is not to remember it all, but to know where to look for answers.
## 1.1 Optimize, optimize, optimize
**The two most important tools:**
1. Solving optimization problems with `scipy.optimize.minimize` and `scipy.optimize.minimize_scalar`
2. Solving equations with `scipy.optimize.root` and `scipy.optimize.root_scalar`
**Problem:** A bit of a black box...
* **Lecture 10:** Details on solving equations.
* **Lecture 11:** Details on numerical optimization.
* **Now:** Compare with a) a *loop search* and b) a *hand-written optimizer*.
### Loops vs. optimizer
**Define function:** Simple polynomial with maximum at $x = 2.0$
```
def f_func(x):
return -3*(x-2)**2 + 1
```
**Rough solution with loop:**
```
N = 100
x_vec = np.linspace(-10,10,N)
f_vec = np.empty(N)
f_best = -np.inf # initial maximum
x_best = np.nan # not-a-number
for i,x in enumerate(x_vec):
f_now = f_vec[i] = f_func(x)
if f_now > f_best:
x_best = x
f_best = f_now
print(f'best with loop is {f_best:.8f} at x = {x_best:.8f}')
```
**Question:** Not quite right, how to improve?
**Plot:**
```
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(x_vec,f_vec,ls='--',lw=2,color='black',label='$f(x)$')
ax.plot(x_best,f_best,ls='',marker='s',label='best')
ax.set_xlabel('x')
ax.set_ylabel('f')
ax.legend(loc='lower center',frameon=True);
```
**Solution with** `scipy.optimize.minimize_scalar` ([documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize_scalar.html#scipy.optimize.minimize_scalar)):
```
obj = lambda x: -f_func(x)
res = optimize.minimize_scalar(obj,bracket=(-10,10),method='brent')
x = res.x
f = -res.fun
print(f'best is {f:.8f} at x = {x:.8f}')
```
**Solution with** `scipy.optimize.minimize` ([documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize)):
```
x_guess = [0]
obj = lambda x: -f_func(x[0])
res = optimize.minimize(obj, x_guess, method='Nelder-Mead')
x = res.x[0]
f = -res.fun
print(f'best is {f:.8f} at x = {x:.8f}')
```
**Solution with** `scipy.optimize.root_scalar` ([documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root_scalar.html)):
Find derivative and solve via FOC:
```
def fp_func(x):
return -6*(x-2)
x_guess = [0]
obj = lambda x: fp_func(x[0])
res = optimize.root(obj,x_guess,method='hybr')
x = res.x[0]
f = f_func(x)
print(f'best is {f:.8f} at x = {x:.8f}')
```
**Solution with** `scipy.optimize.root` ([documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root.html)):
```
obj = lambda x: fp_func(x)
res = optimize.root_scalar(obj,bracket=(-10,10),method='bisect')
x = res.root
f = f_func(res.root)
print(f'best is {f:.8f} at x = {x:.8f}')
```
### Gradient descent optimizer
**Algorithm:** `minimize_gradient_descent()`
1. Choose tolerance $\epsilon>0$, step size $\alpha > 0$, and guess on $x_0$, set $n=0$.
2. Compute $f(x_n)$ and $f^\prime(x_n) \approx \frac{f(\boldsymbol{x}_{n}+\Delta)-f(\boldsymbol{x}_{n})}{\Delta}$.
3. If $|f^\prime(x_n)| < \epsilon$ then stop.
4. Compute new guess "down the hill":
$$
x_{n+1} = x_{n} - \alpha f^\prime(x_n)
$$
5. Set $n = n + 1$ and return to step 2.
**Code for algorithm:**
```
def gradient_descent(f,x0,alpha=1,Delta=1e-8,max_iter=500,eps=1e-8):
""" minimize function with gradient descent
Args:
f (callable): function
x0 (float): initial value
alpha (float,optional): step size factor in search
Delta (float,optional): step size in numerical derivative
max_iter (int,optional): maximum number of iterations
eps (float,optional): tolerance
Returns:
x (float): minimum
fx (float): funciton value at minimum
trials (list): list with tuple (x,value,derivative)
"""
# step 1: initialize
x = x0
n = 0
trials = []
# step 2-4:
while n < max_iter:
# step 2: compute function value and derivative
fx = f(x)
fp = (f(x+Delta)-fx)/Delta
trials.append({'x':x,'fx':fx,'fp':fp})
# step 3: check convergence
print(f'n = {n:3d}: x = {x:12.8f}, f = {fx:12.8f}, fp = {fp:12.8f}')
if np.abs(fp) < eps:
break
# step 4: update x
x -= alpha*fp
# step 5: update n
n += 1
return x,fx,trials
```
**Call the optimizer:**
```
x0 = 0
alpha = 0.5
f = lambda x: -np.sin(x)+0.05*x**2
x,fx,trials = gradient_descent(f,x0,alpha)
print(f'best with gradient_descent is {fx:.8f} at x = {x:.8f}')
```
**Illusstration:**
```
fig = plt.figure(figsize=(10,10))
# a. main figure
ax = fig.add_subplot(2,2,(1,2))
trial_x_vec = [trial['x'] for trial in trials]
trial_f_vec = [trial['fx'] for trial in trials]
trial_fp_vec = [trial['fp'] for trial in trials]
ax.plot(x_vec,f(x_vec),ls='--',lw=2,color='black',label='$f(x)$')
ax.plot(trial_x_vec,trial_f_vec,ls='',marker='s',ms=4,color='blue',label='iterations')
ax.set_xlabel('$x$')
ax.set_ylabel('$f$')
ax.legend(loc='upper center',frameon=True)
# sub figure 1
ax = fig.add_subplot(2,2,3)
ax.plot(np.arange(len(trials)),trial_x_vec)
ax.set_xlabel('iteration')
ax.set_ylabel('x')
# sub figure 2
ax = fig.add_subplot(2,2,4)
ax.plot(np.arange(len(trials)),trial_fp_vec)
ax.set_xlabel('iteration')
ax.set_ylabel('derivative of f');
```
**Question:** Can we guess on any initial value of $x_0$?
<a id="The-consumer-problem"></a>
# 2. The consumer problem
$$
\begin{aligned}
V(p_{1},p_{2},I) & = \max_{x_{1},x_{2}} \left(\alpha^{\frac{1}{\sigma}}x_{1}^{\frac{\sigma-1}{\sigma}}+(1-\alpha)^{\frac{1}{\sigma}}x_{2}^{\frac{\sigma-1}{\sigma}}\right)^{\frac{\sigma}{\sigma-1}}\\
\text{s.t.}\\
p_{1}x_{1}+p_{2}x_{2} & \leq I,\,\,\,p_{1},p_{2},I>0\\
x_{1},x_{2} & \geq 0
\end{aligned}
$$
**Goal:** Create a model-class to solve this problem.
**Utility function:**
```
def u_func(model,x1,x2):
u_x1 = model.alpha**(1/model.sigma)*x1**((model.sigma-1)/model.sigma)
u_x2 = (1-model.alpha)**(1/model.sigma)*x2**((model.sigma-1)/model.sigma)
return (u_x1+u_x2)**(model.sigma/(model.sigma-1))
```
**Solution function:**
```
def solve(model):
# a. objective function (to minimize)
obj = lambda x: -model.u_func(x[0],x[1]) # minimize -> negtive of utility
# b. constraints and bounds
budget_constraint = lambda x: model.I-model.p1*x[0]-model.p2*x[1] # violated if negative
constraints = ({'type':'ineq','fun':budget_constraint})
bounds = ((1e-8,model.I/model.p1-1e-8),(1e-8,model.I/model.p2-1e-8))
# why all these 1e-8? To avoid ever having x1 = 0 or x2 = 0
# c. call solver
x0 = [(model.I/model.p1)/2,(model.I/model.p2)/2]
sol = optimize.minimize(obj,x0,method='SLSQP',bounds=bounds,constraints=constraints)
# d. save
model.x1 = sol.x[0]
model.x2 = sol.x[1]
model.u = model.u_func(model.x1,model.x2)
```
**Create consumer class:**
```
class ConsumerClass:
def __init__(self):
self.alpha = 0.5
self.sigma = 0.1
self.p1 = 1
self.p2 = 2
self.I = 10
u_func = u_func
solve = solve
```
**Solve consumer problem**:
```
jeppe = ConsumerClass() # calls __init__()
jeppe.solve()
print(f'(x1,x2) = ({jeppe.x1:.3f},{jeppe.x2:.3f}), u = {jeppe.u:.3f}')
```
Easy to loop over:
```
for alpha in np.linspace(0.1,0.9,9):
jeppe.alpha = alpha
jeppe.solve()
print(f'alpha = {alpha:.3f} -> (x1,x2) = ({jeppe.x1:.3f},{jeppe.x2:.3f}), u = {jeppe.u:.3f}')
```
**Question:** Anything you want to test?
<a id="A-worker-capitalist-production-economy"></a>
# 3. A worker-capitalist production economy
Consider an economy consisting of $N_w$ **workers**, and $N_c$ **capitalists** and a single **firm** owned equally by the capitalists.
**Workers:** Consume, $c_w$, at a price $p$, and supply labor, $\ell_w$, at a wage of $w$. Maximize utility:
$$\max_{c_w\geq0,\ell_w\in[0,1]} \log (c_w+\kappa)- \omega \ell_w^{\eta} \text{ s.t } pc_w \leq w \ell_w,\,\,\,\omega,\kappa > 0, \eta \geq 1$$
Equivalently, substituting in the budget constraint with equality:
$$\max_{\ell_w\in[0,1]} \log \left( \frac{w \ell_w}{p}+\kappa \right)- \omega \ell_w^{\eta}$$
Denote ***optimal behavior*** $c_w^{\star}(p,w)$ and $\ell_w^{\star}(p,w)$.
**Capitalists:** Consume, $c_c$, at a price $p$, supply labor, $\ell_c$, at a wage $w$, and receives profits $\pi$. Maximize utility:
$$\max_{c_c\geq0,\ell_c\in[0,1]} \log (c_c+\kappa) - \omega \ell_c^{\eta} \text{ s.t } pc_c = w \ell_c + \pi, ,\,\,\,\omega,\kappa > 0, \eta \geq 1$$
Equivalently, substituting in the budget constraint with equality:
$$\max_{\ell_c\in[0,1]} \log \left( \frac{w \ell_c + \pi}{p}+\kappa \right)- \omega \ell_c^{\eta}$$
Denote ***optimal behavior*** $c_c^{\star}(p,w,\pi)$ and $\ell_c^{\star}(p,w,\pi)$.
**Firm:** Use the production function $f(\ell) = \ell^\alpha, \alpha \in (0,1)$. Maximize profits:
$$\max_{\ell\geq0} p f(\ell) - w\ell $$
Denote ***optional behavior*** by $\ell^{\star}(p,w)$.
Implied ***production*** is $y^{\star}(p,w) = f(\ell^{\star}(p,w))$ and implied ***total profits*** are $\Pi^\star(p,w) = py^{\star}(p,w) - w\ell^{\star}(p,w)$
**Equilibrium:** A set of prices $(p,w)$ such that workers, capitalists and firms act optimally given prices and profit, and
1. **Goods market clears**: $N_w c_w^{\star}(p,w) + N_c c_c^{\star}(p,w,\pi) = y^\star(p,w)$
2. **Labor market clears**: $N_w \ell_w^{\star}(p,w) + N_c \ell_c^{\star}(p,w,\pi) = \ell^\star(p,w)$
3. **Profits received equal profits distributed**: $\pi = \frac{py^{\star}(p,w) - w\ell^{\star}(p,w)}{N_c}$
**Note I:** We can use $p=1$ as numeraire.
**Note II:** *Walras' Law* imply that if one of the markets clear, then the other one does too.
## 3.1 Parameters
Choose parameters:
```
par = SimpleNamespace()
par.kappa = 0.1
par.omega = 10
par.eta = 1.50
par.alpha = 0.50
par.Nw = 99
par.Nc = 1
```
**SimpleNamespace():** Like a dictionary, but e.g. `par.kappa` instead of `par['kappa']`.
Can always be interfaced as a dictionary with `__dict__`:
```
for k,v in par.__dict__.items():
print(f'{k:6s} = {v:6.3f}')
```
## 3.2 Workers
```
def utility_w(c,l,par):
""" utility of workers """
return np.log(c+par.kappa)-par.omega*l**par.eta
def workers(p,w,par):
""" maximize utility for workers """
# a. solve
obj = lambda l: -utility_w((w*l)/p,l,par)
res = optimize.minimize_scalar(obj,bounds=(0,1),method='bounded')
# b. save
l_w_star = res.x
c_w_star = (w*l_w_star)/p
return c_w_star,l_w_star
```
**Small test:**
```
p = 1
for w in [0.5,1,1.5]:
c,l = workers(p,w,par)
print(f'w = {w:.2f} -> c = {c:.2f}, l = {l:.2f}')
```
## 3.3 Capitalists
```
def utility_c(c,l,par):
""" utility of capitalists """
return np.log(c+par.kappa)-par.omega*l**par.eta
def capitalists(p,w,pi,par):
""" maximize utility of capitalists """
# a. solve
obj = lambda l: -utility_c((w*l+pi)/p,l,par) # subsittute in the budget constraint
res = optimize.minimize_scalar(obj,bounds=(0,1),method='bounded')
# b. save
l_c_star = res.x
c_c_star = (w*l_c_star+pi)/p
return c_c_star,l_c_star
```
**Small test:**
```
p = 1
pi = 0.1
for w in [0.5,1,1.5]:
c,l = capitalists(p,w,pi,par)
print(f'w = {w:.2f} -> c = {c:.2f}, l = {l:.2f}')
```
**Question:** Any idea for another test?
## 3.4 Firm
```
def firm(p,w,par):
""" maximize firm profits """
# a. solve
f = lambda l: l**par.alpha
obj = lambda l: -(p*f(l)-w*l)
x0 = [0.0]
res = optimize.minimize(obj,x0,bounds=((0,None),),method='L-BFGS-B')
# b. save
l_star = res.x[0]
y_star = f(l_star)
Pi = p*y_star - w*l_star
return y_star,l_star,Pi
```
**Small test:**
```
p = 1
for w in [0.5,1,1.5]:
y,l,Pi = firm(p,w,par)
print(f'w = {w:.2f} -> y = {y:.2f}, l = {l:.2f}, Pi = {Pi:.2f}')
```
## 3.5 Equilibrium
```
def evaluate_equilibrium(w,par,p=None,do_print=False):
""" evaluate equilirium """
# a. normalize output price
p = 1 if p is None else p
# b. optimal behavior of firm
y_star,l_star,Pi = firm(p,w,par)
pi = Pi/par.Nc
# c. optimal behavior of households
c_w_star,l_w_star = workers(p,w,par)
c_c_star,l_c_star = capitalists(p,w,pi,par)
# d. market clearing
goods_mkt_clearing = par.Nw*c_w_star + par.Nc*c_c_star - y_star
labor_mkt_clearing = par.Nw*l_w_star + par.Nc*l_c_star - l_star
if do_print:
u_w = utility_w(c_w_star,l_w_star,par)
print(f'workers : c = {c_w_star:6.4f}, l = {l_w_star:6.4f}, u = {u_w:7.4f}')
u_c = utility_c(c_c_star,l_c_star,par)
print(f'capitalists : c = {c_c_star:6.4f}, l = {l_c_star:6.4f}, u = {u_c:7.4f}')
print(f'goods market : {goods_mkt_clearing:.8f}')
print(f'labor market : {labor_mkt_clearing:.8f}')
else:
return goods_mkt_clearing
```
**Step 1:** Perform rough grid search to check when the goods market clears.
```
num_w = 10
grid_w = np.linspace(0.1,1.5,num_w)
grid_mkt_clearing = np.zeros(num_w)
for i,w in enumerate(grid_w):
grid_mkt_clearing[i] = evaluate_equilibrium(w,par)
print(f'w = {w:.2f} -> excess demand = {grid_mkt_clearing[i]:12.8f}')
```
**Step 2:** Find where *excess demand* changes sign - the equilibrium price must be within this range
```
left = np.max(grid_w[grid_mkt_clearing < 0])
right = np.min(grid_w[grid_mkt_clearing > 0])
print(f'equilibrium price must be in [{left:.2f},{right:.2f}]')
```
**Step 3:** Use equation-solver / root-finder
```
res = optimize.root_scalar(evaluate_equilibrium,bracket=[left,right],method='bisect',args=(par,))
w_eq = res.root
print(f'the equilibrium wage is {w_eq:.4f}')
```
**Show details:**
```
evaluate_equilibrium(w_eq,par,do_print=True)
```
**Check I:** Does both markets clear?
**Check II:** Can we multiply both prices with the same factor? I.e. can we change the numeraire?
```
fac = 100
p_eq_ = fac*1.0
w_eq_ = fac*w_eq
evaluate_equilibrium(w_eq_,par,p=p_eq_,do_print=True)
```
## 3.6 Experiments
It is easy to extend this model in many directions:
1. Should workers and capitalists have different tastes or producitvity?
2. Should workers differ wrt. tastes or producitvity?
3. Should there be government redistribution?
4. Other ideas?
## 3.7 Using a class
```
from WorkerCapitalistEconomy import WorkerCapitalistEconomyClass
```
**Look at `WorkerCapitalistEconomy.py`:** Same code, but written as a class!
```
model = WorkerCapitalistEconomyClass()
print(model.par.kappa) # excess the class data with "".property"
model.find_equilibrium()
```
**Benefit I:** Fewer inputs and outputs, less risk of wrong ordering.
**Benefit II of class-based solution:** Easy access to all data.
E.g. capitalists share of total consumption.
```
C_w = model.par.Nw*model.c_w_star
C_c = model.par.Nc*model.c_c_star
print(f'capitalists share of total consumption is: {C_c/(C_c+C_w):.2f}')
```
**Benefit III of class-based solution:** Easy to experiment with different parameters.
```
model.par.kappa = model.par.kappa/100 # lower kappa
model.find_equilibrium()
```
<a id="Inuagural-project-from-last-year-(labor-supply-and-taxation)"></a>
# 4. Inuagural project from last year (labor supply and taxation)
Consider a consumer solving the following maximization problem
$$\begin{eqnarray}
c^{\star},\ell^{\star} & = & \arg\max_{c,\ell}\log(c)-\nu\frac{\ell^{1+\frac{1}{\varepsilon}}}{1+\frac{1}{\varepsilon}}\\
& \text{s.t.} \\
x & = & m+w\ell-\left[\tau_{0}w\ell+\tau_{1}\max\{w\ell-\kappa,0\}\right] \\
c & \in & [0,x] \\
\ell & \in & [0,1]
\end{eqnarray}$$
where $c$ is consumption, $\ell$ is labor supply, $m$ is cash-on-hand,
$w$ is the wage rate, $\tau_{0}$ is the standard labor income tax,
$\tau_{1}$ is the top bracket labor income tax, $\kappa$ is the
cut-off for the top labor income bracket, $x$ is total resources,
$\nu$ scales the disutility of labor, and $\varepsilon$ is the Frisch
elasticity of labor supply.
Note that utility is monotonically increasing in consumption. This implies that
$$\begin{equation}
c^{\star}=x
\end{equation}$$
**Question 1:** Construct a function which solves the consumer given the parameters.
We choose the following parameter values
$$
m=1,\,\nu=10,\,\varepsilon=0.3,\,\tau_{0}=0.4,\,\tau_{1}=0.1,\,\kappa=0.4
$$
**Question 2:** Plot $\ell^{\star}$ and $c^{\star}$ as functions of $w$ in
the range $0.5$ to $1.5$.
Consider a population with $N=1,000$ individuals indexed by $i$.
Assume the distribution of wages is uniform such that
$$w_{i}\sim\mathcal{U}(0.5,1.5).$$
Denote the optimal choices of individual $i$ by $\ell_{i}^{\star}$ and $c_{i}^{\star}$.
**Question 3:** Calculate the total tax revenue given by $T=\sum_{i=1}^{N}\left[\tau_{0}w_{i}\ell_{i}^{\star}+\tau_{1}\max\{w_{i}\ell_{i}^{\star}-\kappa,0\}\right].$
**Question 4:** What would the tax revenue be if instead $\varepsilon=0.1$?
Consider a politician who wishes to maximize the tax revenue.
**Question 5:** Which $\tau_{0}$, $\tau_{1}$ and $\kappa$ would you suggest her to implement? Report the tax revenue you expect to obtain.
## 4.1 Solution of question 1+2
All the basic functions are written in `LaborSupplyModel.py`.
```
import LaborSupplyModel as LSM
```
Define all **parameters**:
```
m = 1
nu = 10
frisch = 0.3
tau0 = 0.4
tau1 = 0.1
kappa = 0.4
```
**Allocate** arrays for solutions:
```
N = 1_000
w_vec = np.linspace(0.5,1.5,N)
l_vec = np.zeros(N)
c_vec = np.zeros(N)
```
**Solve:**
```
for i in range(N):
l_vec[i] = LSM.find_optimal_labor_supply(nu,frisch,m,w_vec[i],tau0,tau1,kappa)
c_vec[i] = LSM.implied_c(l_vec[i],m,w_vec[i],tau0,tau1,kappa)
```
**Plot results:**
```
fig = plt.figure(figsize=(12,4))
ax = fig.add_subplot(1,2,1)
ax.plot(w_vec,l_vec,'-')
ax.set_ylabel('labor supply, $\ell$')
ax.set_xlabel('wage, $w$')
ax.set_title('Labor suppply')
ax = fig.add_subplot(1,2,2)
ax.plot(w_vec,c_vec,'-')
ax.set_ylabel('consumption, $c$')
ax.set_xlabel('consumption, $c$')
ax.set_title('Consumption');
```
## 4.2 Solution of question 3
Calculate **tax revnue** using that a equally spaced vector approximates a uniform distribution:
```
T = np.sum(LSM.implied_tax(l_vec,w_vec,tau0,tau1,kappa))
print(f'total tax revenue is: {T:.4f}')
```
Using **random sampling** is also a possibility:
```
# a. set seed
np.random.seed(1917)
# b. run replications
reps = 50
T_vec = np.zeros(reps)
for rep in range(reps):
# i. draw randow wages
w_vec_ = np.random.uniform(0.5,1.5,size=N)
# ii. find labor supply
l_vec_ = np.zeros(N)
for i in range(N):
l_vec_[i] = LSM.find_optimal_labor_supply(nu,frisch,m,w_vec_[i],tau0,tau1,kappa)
# iii. find tax revenue
T_vec[rep] = np.sum(LSM.implied_tax(l_vec_,w_vec_,tau0,tau1,kappa))
if rep < 10 or rep%10 == 0:
print(f'{rep:2d}: {T_vec[rep]:.4f}')
# c. mean
print(f'mean: {np.mean(T_vec):.4f} [{np.min(T_vec):.4f} {np.max(T_vec):.4f}]')
```
## 4.3 Question 4
**Re-solve** with $\epsilon = 0.1$:
```
frisch_low = 0.1
l_vec_frisch_low = np.zeros(N)
for i in range(N):
l_vec_frisch_low[i] = LSM.find_optimal_labor_supply(nu,frisch_low,m,w_vec[i],tau0,tau1,kappa)
```
Re-calculate **tax revenue**:
```
T_frisch_low = np.sum(LSM.implied_tax(l_vec_frisch_low,w_vec,tau0,tau1,kappa))
print(f'total tax revenue is: {T_frisch_low:.4f}')
```
**Conclusion:** Higher tax revenue because of lower Frish elasticity.
## 4.4 Question 5
Define function to calculate **tax revenue for guess of tax parameters**:
```
def tax_revenue(nu,frisch,m,w_vec,tau0,tau1,kappa):
""" find total tax revenue and labor and consumpty
Args:
nu (float): disutility of labor supply
frisch (float): frisch elasticity of labor supply
m (float): cash-on-hand
w_vec (np.array): wage
tau0 (float): standard labor tax
tau1 (float): top bracket labor income tax
kappa (float): cut-off for the top labor income bracket
Returns:
(float): total tax revenue
"""
# a. optimal labor supply
N = w_vec.size
l_vec = np.zeros(N)
for i in range(N):
l_vec[i] = LSM.find_optimal_labor_supply(nu,frisch,m,w_vec[i],tau0,tau1,kappa)
# b. taxes
T = np.sum(LSM.implied_tax(l_vec,w_vec,tau0,tau1,kappa))
return T
```
Define **objective function for optimizer**:
```
def obj(x,nu,frisch_low,m,w_vec):
""" find negative of total tax revenue
Args:
x (np.array): tax parameters
nu (float): disutility of labor supply
frisch (float): frisch elasticity of labor supply
m (float): cash-on-hand
w_vec (np.array): wage
Returns:
(float): minus total tax revenue
"""
global it
# a. determine parameters
tau0 = x[0]
if x.size > 1:
tau1 = x[1]
kappa = x[2]
else:
tau1 = 0.0
kappa = 0.0
# b. calculate tax revnue
T = tax_revenue(nu,frisch_low,m,w_vec,tau0,tau1,kappa)
# c. print
print(f'{it:3d}: tau0 = {tau0:10.8f}, tau1 = {tau1:10.8f}, kappa = {kappa:10.8f} -> T = {T:12.8f},')
it += 1
return -T
```
**Solve:**
```
# a. initial guess and bounds
x0 = np.array([tau0,tau1,kappa])
bounds = ((0,0.99),(0,0.99),(0,1.5))
# b. call solver
it = 0
result = optimize.minimize(obj,x0,
method='SLSQP',bounds=bounds,
args=(nu,frisch,m,w_vec)
)
```
**Have we found the global optimum?**
**Same result with another initial guess?**
```
# a. initial guess and bounds
x0 = np.array([0.1,0.1,0.1])
bounds = ((0,0.99),(0,0.99),(0,1.5))
# b. call solver
it = 0
result = optimize.minimize(obj,x0,
method='SLSQP',bounds=bounds,
args=(nu,frisch,m,w_vec)
)
```
**Can we improve if we force $\tau_1 = \kappa = 0$?**
```
# a. initial guess and bounds
x0 = np.array([result.x[0]])
bounds = ((0,0.99),)
# b. call solver
it = 0
result = optimize.minimize(obj,x0,
method='SLSQP',bounds=bounds,
args=(nu,frisch,m,w_vec)
)
```
**Can we improve if fix $\kappa$ to some value?**
```
def obj_kappa(x,nu,frisch_low,m,w_vec,kappa):
""" find negative of total tax revenue
Args:
x (np.array): tax parameters
nu (float): disutility of labor supply
frisch (float): frisch elasticity of labor supply
m (float): cash-on-hand
w_vec (np.array): wage
kappa (float): cut-off for the top labor income bracket
Returns:
(float): minus total tax revenue
"""
global it
# a. determine parameters
tau0 = x[0]
tau1 = x[1]
# b. calculate tax revnue
T = tax_revenue(nu,frisch_low,m,w_vec,tau0,tau1,kappa)
# c. print
print(f' {it:3d}: tau0 = {tau0:10.8f}, tau1 = {tau1:10.8f} -> T = {T:12.8f},')
it += 1
return -T
# a. initial guess and bounds
x0 = np.array([0.1,0.1])
bounds = ((0,0.99),(0,0.99))
# b. call solver
for kappa in [0.05,0.1,0.15]:
print(f'kappa = {kappa:.3f}')
it = 0
result = optimize.minimize(obj_kappa,x0,
method='SLSQP',bounds=bounds,
args=(nu,frisch,m,w_vec,kappa)
)
print('')
```
**Suggestions for other tests?**
<a id="Summary"></a>
# 5. Summary
1. **Main takeway:** You are actually already equipped to solve a lot of interesting economic models.
2. **Next time:** Pandas, the central Python package for working with data.
| github_jupyter |
# Computing the Bayesian Hilbert Transform-DRT
In this tutorial example, we will illustrate how the BHT-DRT model works for impedance data that is unbounded. The real experimental data was from the following article:
Wu et al. Dual-phase MoS2 as a high-performance sodium-ion battery anode. Journal of Materials Chemistry A, 8, 2114-2122 (2020). https://doi.org/10.1039/C9TA11913B
```
# import the libraries
import numpy as np
from math import pi, log10
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
# core library
import Bayes_HT
import importlib
importlib.reload(Bayes_HT)
# plot standards
plt.rc('font', family='serif', size=15)
plt.rc('text', usetex=True)
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
```
## 1) Read the impedance data, $Z_{\rm exp}(\omega)$, and show both impedance and admittance, $Y_{\rm exp}(\omega)$
### 1.1) Read the impedance data and define the frequency range
```
Z_data = pd.read_csv('MoS2 2H.csv')
freq_vec, Z_exp = Z_data['freq'].values, Z_data['Z_Re'].values + 1j*Z_data['Z_Im'].values
freq_min = min(freq_vec)
freq_max = max(freq_vec)
tau_vec = 1./freq_vec
# tau_vec = np.logspace(0.6, -6.2, num=81, endpoint=True)
omega_vec = 2.*pi*freq_vec
```
### 1.2) show the impedance in Nyquist plot
```
fig, ax = plt.subplots()
plt.plot(np.real(Z_exp), -np.imag(Z_exp), 'o', markersize=8, color='red', label='exp')
plt.plot(np.real(Z_exp[6:30:12]), -np.imag(Z_exp[6:30:12]), 's', markersize=8, color="black")
plt.plot(np.real(Z_exp[42]), -np.imag(Z_exp[42]), 's', markersize=8, color="black")
plt.annotate(r'$0.1$', xy=(np.real(Z_exp[6]), -np.imag(Z_exp[6])),
xytext=(np.real(Z_exp[6])+20, -np.imag(Z_exp[6])),
arrowprops=dict(arrowstyle='-',connectionstyle='arc'))
plt.annotate(r'$10.3$', xy=(np.real(Z_exp[18]), -np.imag(Z_exp[18])),
xytext=(np.real(Z_exp[18])-10, 30-np.imag(Z_exp[18])),
arrowprops=dict(arrowstyle='-',connectionstyle='arc'))
plt.annotate(r'$10^{5}$', xy=(np.real(Z_exp[42]), -np.imag(Z_exp[42])),
xytext=(np.real(Z_exp[42])-10, 20-np.imag(Z_exp[42])),
arrowprops=dict(arrowstyle='-',connectionstyle='arc'))
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=15)
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
plt.axis('scaled')
plt.xlim(10, 160)
plt.ylim(0, 210)
plt.xticks(range(10, 170, 50))
plt.xlabel(r'$Z_{\rm re}/\Omega$', fontsize = 20)
plt.ylabel(r'$-Z_{\rm im}/\Omega$', fontsize = 20)
plt.show()
```
### 1.2) show the admittance
```
Y_exp = 1000/Z_exp
fig, ax = plt.subplots()
plt.plot(np.real(Y_exp), np.imag(Y_exp), 'o', markersize=8, color='red', label='exp')
plt.plot(np.real(Y_exp[6:43:12]), np.imag(Y_exp[6:43:12]), 's', markersize=8, color="black")
plt.annotate(r'$0.1$', xy=(np.real(Y_exp[6]), np.imag(Y_exp[6])),
xytext=(np.real(Y_exp[6])-3, 2+np.imag(Y_exp[6])+2),
arrowprops=dict(arrowstyle='-',connectionstyle='arc'))
plt.annotate(r'$10.3$', xy=(np.real(Y_exp[18]), np.imag(Y_exp[18])),
xytext=(np.real(Y_exp[18])-8, np.imag(Y_exp[18])),
arrowprops=dict(arrowstyle='-',connectionstyle='arc'))
plt.annotate(r'$10^{3}$', xy=(np.real(Y_exp[30]), np.imag(Y_exp[30])),
xytext=(np.real(Y_exp[30])+4, np.imag(Y_exp[30])),
arrowprops=dict(arrowstyle='-',connectionstyle='arc'))
plt.annotate(r'$10^{5}$', xy=(np.real(Y_exp[42]), np.imag(Y_exp[42])),
xytext=(np.real(Y_exp[42])+3, np.imag(Y_exp[42])+2),
arrowprops=dict(arrowstyle='-',connectionstyle='arc'))
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=15)
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
plt.axis('scaled')
plt.xlim(0, 50)
plt.ylim(-0.5, 15)
plt.yticks(range(0, 16, 5))
plt.xlabel(r'$Y_{\rm re}/{\rm mS}$', fontsize = 20)
plt.ylabel(r'$Y_{\rm im}/{\rm mS}$', fontsize = 20)
plt.show()
```
## 2) Calculate the DRT admittance $Y_{\rm DRT}(\omega)$ and the Hilbert transformed admittance $Y_{\rm H}(\omega)$
### 2.1) optimize the hyperparamters
```
# set the parameters
sigma_n = 0.1
sigma_beta = 1
sigma_lambda = 1
theta_0 = np.array([sigma_n, sigma_beta, sigma_lambda])
data_real, data_imag, scores = Bayes_HT.HT_est(theta_0, Y_exp, freq_vec, tau_vec, data_flag='admittance')
```
### 2.2) Calculate the real part of the $Y_{\rm DRT}(\omega)$ and the imaginary part of the $Y_{\rm H}(\omega)$
#### 2.2.1) Bayesian regression to obtain the real part of impedance for both mean and covariance
```
mu_Y_re = data_real.get('mu_Z')
cov_Y_re = np.diag(data_real.get('Sigma_Z'))
# the mean and covariance of $R_\infty$
mu_inv_R_inf = data_real.get('mu_gamma')[0]
cov_inv_R_inf = np.diag(data_real.get('Sigma_gamma'))[0]
```
#### 2.2.2) Calculate the real part of DRT impedance for both mean and covariance
```
mu_Y_DRT_re = data_real.get('mu_Z_DRT')
cov_Y_DRT_re = np.diag(data_real.get('Sigma_Z_DRT'))
```
#### 2.2.3) Calculate the imaginary part of HT impedance for both mean and covariance
```
mu_Y_H_im = data_real.get('mu_Z_H')
cov_Y_H_im = np.diag(data_real.get('Sigma_Z_H'))
```
#### 2.2.4) Estimate the $\sigma_n$
```
sigma_n_re = data_real.get('theta')[0]
```
### 2.3) Calculate the imaginary part of the $Y_{\rm DRT}(\omega)$ and the real part of the $Y_{\rm H}(\omega)$
```
# 2.3.1 Bayesian regression
mu_Y_im = data_imag.get('mu_Z')
cov_Y_im = np.diag(data_imag.get('Sigma_Z'))
mu_C_0 = data_imag.get('mu_gamma')[0]
cov_C_0 = np.diag(data_imag.get('Sigma_gamma'))[0]
# 2.3.2 DRT part
mu_Y_DRT_im = data_imag.get('mu_Z_DRT')
cov_Y_DRT_im = np.diag(data_imag.get('Sigma_Z_DRT'))
# 2.3.3 HT prediction
mu_Y_H_re = data_imag.get('mu_Z_H')
cov_Y_H_re = np.diag(data_imag.get('Sigma_Z_H'))
# 2.3.4 estimated sigma_n
sigma_n_im = data_imag.get('theta')[0]
```
## 3) Plot the BHT_DRT
### 3.1) plot the real part of admittance for both Bayesian regression and the synthetic experiment
```
band = np.sqrt(cov_Y_re)
plt.fill_between(freq_vec, mu_Y_re-3*band, mu_Y_re+3*band, facecolor='lightgrey')
plt.semilogx(freq_vec, mu_Y_re, linewidth=4, color='black', label='mean')
plt.semilogx(freq_vec, Y_exp.real, 'o', markersize=8, color='red', label='exp')
plt.xlim(8E-3, 1E6)
# plt.ylim(0, 50)
plt.xscale('log')
# plt.yticks(range(5, 60, 10))
plt.xlabel(r'$f/{\rm Hz}$', fontsize=20)
plt.ylabel(r'$Y_{\rm re}/{\rm mS}$', fontsize=20)
plt.legend(frameon=False, fontsize = 15)
```
### 3.2) plot the imaginary part of admittance for both Bayesian regression and the synthetic experiment
```
band = np.sqrt(cov_Y_im)
plt.fill_between(freq_vec, mu_Y_im-3*band, mu_Y_im+3*band, facecolor='lightgrey')
plt.semilogx(freq_vec, mu_Y_im, linewidth=4, color='black', label='mean')
plt.semilogx(freq_vec, Y_exp.imag, 'o', markersize=8, color='red', label='exp')
plt.xlim(8E-3, 1E6)
# plt.ylim(0, 12)
plt.xscale('log')
plt.xlabel(r'$f/{\rm Hz}$', fontsize=20)
plt.ylabel(r'$Y_{\rm im}/{\rm mS}$', fontsize=20)
plt.legend(frameon=False, fontsize = 15)
plt.show()
```
### 3.3) plot the real part of admittance for both Hilbert transform and the synthetic experiment
```
mu_Y_H_re_agm = mu_inv_R_inf + mu_Y_H_re
band_agm = np.sqrt(cov_inv_R_inf + cov_Y_H_re + sigma_n_im**2)
plt.fill_between(freq_vec, mu_Y_H_re_agm-3*band_agm, mu_Y_H_re_agm+3*band_agm, facecolor='lightgrey')
plt.semilogx(freq_vec, mu_Y_H_re_agm, linewidth=4, color='black', label='mean')
plt.semilogx(freq_vec, Y_exp.real, 'o', markersize=8, color='red', label='exp')
plt.xlim(8E-3, 1E6)
# plt.ylim(0, 50)
plt.xscale('log')
plt.xlabel(r'$f/{\rm Hz}$', fontsize=20)
plt.ylabel(r'$\left(1/R_\infty + Y_{\rm H, re}\right)/{\rm mS}$', fontsize=20)
plt.legend(frameon=False, fontsize = 15)
plt.show()
```
### 3.4) plot the imaginary part of admittance for both Hilbert transform and the synthetic experiment
```
mu_Y_H_im_agm = omega_vec*mu_C_0 + mu_Y_H_im
band_agm = np.sqrt((omega_vec**2)*cov_C_0 + cov_Y_H_im + sigma_n_re**2)
plt.fill_between(freq_vec, mu_Y_H_im_agm-3*band_agm, mu_Y_H_im_agm+3*band_agm, facecolor='lightgrey')
plt.semilogx(freq_vec, mu_Y_H_im_agm, linewidth=4, color='black', label='mean')
plt.semilogx(freq_vec, Y_exp.imag, 'o', markersize=8, color='red', label='exp')
plt.xlim(8E-3, 1E6)
# plt.ylim(-33, 30)
plt.xscale('log')
plt.xlabel(r'$f/{\rm Hz}$', fontsize=20)
plt.ylabel(r'$\left(\omega C_0 + Y_{\rm H,im}\right)/{\rm mS}$', fontsize=20)
plt.legend(frameon=False, fontsize = 15)
```
### 3.5) plot the difference between real part of admittance for Hilbert transform and the synthetic experiment
```
difference_re = mu_inv_R_inf + mu_Y_H_re - Y_exp.real
band = np.sqrt(cov_inv_R_inf + cov_Y_H_re + sigma_n_im**2)
fig = plt.figure(figsize=(12,4.2), constrained_layout=False)
gs1 = fig.add_gridspec(nrows=1, ncols=4, left=0.05, right=0.48, wspace=0.15)
ax1 = fig.add_subplot(gs1[0, :-1])
ax2 = fig.add_subplot(gs1[0, -1])
ax1.fill_between(freq_vec, -3*band, 3*band, facecolor='lightgrey')
ax1.plot(freq_vec, difference_re, 'o', markersize=8, color='red')
ax1.set_xlim(freq_min, freq_max)
ax1.set_xscale('log')
ax1.set_xticks(np.logspace(-1, 5, 4, endpoint=True))
ax1.set_ylim(-0.8, 0.8)
# ax1.set_yticks(np.arange(-0.8, 0.8, 0.2))
ax1.set_xlabel(r'$f/{\rm Hz}$', fontsize=20)
ax1.set_ylabel(r'$\left(1/R_\infty + Y_{\rm H, re} - Y_{\rm exp, re}\right)/{\rm mS}$', fontsize=20)
# density distribution plot
sns.kdeplot(difference_re, ax=ax2, shade=True, color='grey', vertical=True)
sns.rugplot(difference_re, ax=ax2, color='black', vertical=True)
# ax2.set_xlim(0, 0.5)
# ax2.set_xticks(np.arange(0.1, 0.6, 0.2))
ax2.set_yticks(ax1.get_yticks())
ax2.set_yticklabels([])
ax2.set_ylim(-0.8, 0.8)
ax2.set_xlabel(r'pdf',fontsize=20)
plt.show()
```
### 3.6) plot the difference between imaginary part of admittance for Hilbert transform and the synthetic experiment
```
difference_im = omega_vec*mu_C_0 + mu_Y_H_im - Y_exp.imag
band = np.sqrt((omega_vec**2)*cov_C_0 + cov_Y_H_im + sigma_n_re**2)
fig = plt.figure(figsize=(12,4.2), constrained_layout=False)
gs1 = fig.add_gridspec(nrows=1, ncols=4, left=0.05, right=0.48, wspace=0.15)
ax1 = fig.add_subplot(gs1[0, :-1])
ax2 = fig.add_subplot(gs1[0, -1])
ax1.fill_between(freq_vec, -3*band, 3*band, facecolor='lightgrey')
ax1.plot(freq_vec, difference_im, 'o', markersize=8, color='red')
ax1.set_xlim(freq_min, freq_max)
ax1.set_xscale('log')
ax1.set_xticks(np.logspace(-1, 5, 4, endpoint=True))
ax1.set_ylim(-0.8, 0.8)
ax1.set_xlabel(r'$f/{\rm Hz}$', fontsize=20)
ax1.set_ylabel(r'$\left(\omega C_0 + Y_{\rm H,im} - Y_{\rm exp,im}\right)/{\rm mS}$', fontsize=20)
# density distribution plot
sns.kdeplot(difference_im, ax=ax2, shade=True, color='grey', vertical=True)
sns.rugplot(difference_im, ax=ax2, color='black', vertical=True)
# ax2.set_xlim(0, 0.4)
ax2.set_yticks(ax1.get_yticks())
ax2.set_yticklabels([])
ax2.set_ylim(-0.8, 0.8)
ax2.set_xlabel(r'pdf',fontsize=20)
```
| github_jupyter |
# Document retrieval from wikipedia data
# Fire up GraphLab Create
```
import graphlab
```
# Load some text data - from wikipedia, pages on people
```
people = graphlab.SFrame('people_wiki.gl/')
```
Data contains: link to wikipedia article, name of person, text of article.
```
people.head()
len(people)
```
# Explore the dataset and checkout the text it contains
## Exploring the entry for president Obama
```
obama = people[people['name'] == 'Barack Obama']
obama
obama['text']
```
## Exploring the entry for actor George Clooney
```
clooney = people[people['name'] == 'George Clooney']
clooney['text']
```
# Get the word counts for Obama article
```
obama['word_count'] = graphlab.text_analytics.count_words(obama['text'])
print obama['word_count']
```
## Sort the word counts for the Obama article
### Turning dictonary of word counts into a table
```
obama_word_count_table = obama[['word_count']].stack('word_count', new_column_name = ['word','count'])
```
### Sorting the word counts to show most common words at the top
```
obama_word_count_table.head()
obama_word_count_table.sort('count',ascending=False)
```
Most common words include uninformative words like "the", "in", "and",...
# Compute TF-IDF for the corpus
To give more weight to informative words, we weigh them by their TF-IDF scores.
```
people['word_count'] = graphlab.text_analytics.count_words(people['text'])
people.head()
tfidf = graphlab.text_analytics.tf_idf(people['word_count'])
tfidf
people['tfidf'] = tfidf['docs']
```
## Examine the TF-IDF for the Obama article
```
obama = people[people['name'] == 'Barack Obama']
obama[['tfidf']].stack('tfidf',new_column_name=['word','tfidf']).sort('tfidf',ascending=False)
```
Words with highest TF-IDF are much more informative.
# Manually compute distances between a few people
Let's manually compare the distances between the articles for a few famous people.
```
clinton = people[people['name'] == 'Bill Clinton']
beckham = people[people['name'] == 'David Beckham']
```
## Is Obama closer to Clinton than to Beckham?
We will use cosine distance, which is given by
(1-cosine_similarity)
and find that the article about president Obama is closer to the one about former president Clinton than that of footballer David Beckham.
```
graphlab.distances.cosine(obama['tfidf'][0],clinton['tfidf'][0])
graphlab.distances.cosine(obama['tfidf'][0],beckham['tfidf'][0])
```
# Build a nearest neighbor model for document retrieval
We now create a nearest-neighbors model and apply it to document retrieval.
```
knn_model = graphlab.nearest_neighbors.create(people,features=['tfidf'],label='name')
```
# Applying the nearest-neighbors model for retrieval
## Who is closest to Obama?
```
knn_model.query(obama)
```
As we can see, president Obama's article is closest to the one about his vice-president Biden, and those of other politicians.
## Other examples of document retrieval
```
swift = people[people['name'] == 'Taylor Swift']
knn_model.query(swift)
jolie = people[people['name'] == 'Angelina Jolie']
knn_model.query(jolie)
arnold = people[people['name'] == 'Arnold Schwarzenegger']
knn_model.query(arnold)
```
| github_jupyter |
# Genotype data preprocessing
This section documents output from the genotype section (colored in light yellow) of command generator MWE and explained the purpose for each of the command. The file used in this page can be found at [here](https://drive.google.com/drive/folders/16ZUsciZHqCeeEWwZQR46Hvh5OtS8lFtA?usp=sharing).
**For mutli-theme study using same set of genotype (i.e. different tissues for the same participant) , the VCF QC and final Genotype QC steps are ran once. The related/unrelated genotype QC and PCA were ran once per tissue, as only the participant also have rna-seq in the corresponding tissue will be used for PCA**
```
%preview ../images/eqtl_command.png
```
## Overview
### Analysis steps
1. Genotype data quality control (QC). See here for the [QC default settings](https://cumc.github.io/xqtl-pipeline/code/data_preprocessing/genotype/GWAS_QC.html).
2. Principle component analysis (PCA) based QC, and PC computation for each sub-population available in the genotype data.
3. Genomic relationship matrix (GRM) computation (optional, when in Step 1 genotype QC detects related individuals or the input is known to have related individuals).
### Input data requirement
1. Genotype data. See here for [format details](https://cumc.github.io/xqtl-pipeline/code/data_preprocessing/genotype/genotype_formatting.html).
2. [Optional] a sample information file to specific population information, if external data such as HapMap or 1000 Genomes are to be integrated to the PCA analysis to visualize and assess population structure in the genotype data. See here for [format details](https://cumc.github.io/xqtl-pipeline/code/data_preprocessing/genotype/genotype_formatting.html).
## QC for VCF files
This step extract the rsID for the known varriants, so that we can distinguish them from the novel variants we had in our study. The procedure/rationale is [explained in this post](https://hbctraining.github.io/In-depth-NGS-Data-Analysis-Course/sessionVI/lessons/03_annotation-snpeff.html).
```
sos run pipeline/VCF_QC.ipynb dbsnp_annotate \
--genoFile reference_data/00-All.vcf.gz \
--cwd output/reference_data \
--container containers/bioinfo.sif \
-J 50 -c csg.yml -q csg --add_chr
```
Perform QC on VCF files. The QC-ed data will also be exported to PLINK format for next steps analysis. The QC procedure includes:
1. Handling the formatting of multi-allelic sites,
2. Genotype and variant level filtering based on genotype calling qualities.
2.1. Genotype depth filters: For WES data, UK Biobank recommends **SNPs DP>10 and Indels DP>10 for indels.** However we think for WGS we can be less stringent, or simply rely on GQ. Users can set it to 1 eg, `--DP 1 --DP-indel 1 `
2.2. Genotype quality GQ>20.
2.3. At least one sample per site passed the allele balance threshold >= 0.15 for SNPs and >=0.20 for indels (heterozygous variants). Allele balance is calculated for heterozygotes as the number of bases supporting the least-represented allele over the total number of base observations.
3. Known/novel variants annotation
4. Summary statistics before and after QC, in particular the ts/tv ratio, to assess the effectiveness of QC.
3 and 4 above are for explorative analysis on the overall quality assessment of genotype data in the VCF files. We annotate known and novel variants because ts/tv are expected to be different between known and novel variants, and is important QC metric to assess the effectiveness of our QC.
After the procedure was done, the output will be converted to a plink bed/bim/fam binary format for downstream analysis.
```
sos run VCF_QC.ipynb qc \
--genoFile data/mwe/mwe_genotype.vcf.gz \
--dbsnp-variants data/reference_data/00-All.add_chr.variants.gz \
--reference-genome data/reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \
--cwd output/genotype_4 --container ./containers/bioinfo.sif
```
## Genotype QC for PCA
To accuratly estimate the PCs for the genotype. We split participants based on their kinship coefficients, estimated by king. We have implemented a graph-based method to avoid removing unrelated participants that are in a family. However, this option is disable by default due to the long running time it required. User can enable this option by changing the `--no-maximize_unrelated` to `--maximize_unrelated`
```
sos run pipeline/GWAS_QC.ipynb king \
--cwd data_preprocessing/MWE/genotype_data \
--genoFile MWE.bed \
--name MWE \
--keep-samples data_preprocessing/MWE/sampleSheetAfterQC.filtered_geno.txt \
--container containers/bioinfo.sif \
--walltime 48h --no-maximize_unrelated
```
Variant level and sample level QC on unrelated individuals using missingness > 10% ,MAF>5% , and LD-prunning in preparation for PCA analysis:
```
sos run pipeline/GWAS_QC.ipynb qc \
--cwd data_preprocessing/MWE/genotype_data \
--genoFile data_preprocessing/MWE/genotype_data/MWE.MWE.unrelated.bed \
--exclude-variants /mnt/mfs/statgen/snuc_pseudo_bulk/Ast/genotype/dupe_snp_to_exclude \
--maf-filter 0.05 \
--container containers/bioinfo.sif \
```
Extract previously selected variants from related individuals in preparation for PCA, only applying missingness filter at sample level,
```
sos run pipeline/GWAS_QC.ipynb qc_no_prune \
--cwd data_preprocessing/MWE/genotype_data \
--genoFile data_preprocessing/MWE/genotype_data/MWE.MWE.related.bed \
--maf-filter 0 \
--geno-filter 0 \
--mind-filter 0.1 \
--hwe-filter 0 \
--keep-variants data_preprocessing/MWE/genotype_data/MWE.MWE.unrelated.filtered.prune.in \
--container containers/bioinfo.sif \
--mem 40G
```
## Finalized genotype QC, optionally as a result of PCA
### Common varriant:
In our demo, the genotype data was filtered by the criteria of missingness > 10% and MAF>5% as shown below. User can detemined the level of filtering by changing the generated command manually.
```
sos run pipeline/GWAS_QC.ipynb qc_no_prune \
--cwd output/data_preprocessing \
--genoFile output/data_preprocessing/mwe_genotype.add_chr.leftnorm.filtered.bed \
--maf-filter 0.05 \
--geno-filter 0.1 \
--mind-filter 0.1 \
--hwe-filter 1e-06 \
--mem 40G \
--container containers/bioinfo.sif
```
### Remove outlier
PCA analysis (see covariate formatting page) can possibly provide us a list of outlier samples to be removed from the genotype data. We can optionally remove them and perform genotype data QC.
Removal of samples is a complicated decision that require human descretion. Therefore outlier removal is not performed automatically. The `--remove-samples` parameter take a two columns table that illustrated by `output/data_preprocessing/MWE/pca/MWE.MWE.related.filtered.extracted.pca.projected.outliers` in `MWEoutput.tar.gz`
```
sos run pipeline/GWAS_QC.ipynb qc_no_prune \
--cwd output/data_preprocessing \
--genoFile output/data_preprocessing/mwe_genotype.add_chr.leftnorm.filtered.bed \
--maf-filter 0.05 \
--geno-filter 0.1 \
--mind-filter 0.1 \
--hwe-filter 1e-06 \
--mem 40G \
--container containers/bioinfo.sif \
--remove-samples output/data_preprocessing/MWE/pca/MWE.MWE.related.filtered.extracted.pca.projected.outliers
```
Also remove outliers and keep the same variant as unrelated individuals, in related individuals:
To fully illustrate how PCA and PCA based QC is done on sample with **multiple populations (a more complicated but potentially very useful setting)**, please try out the example documented in the [PCA analysis module](https://cumc.github.io/xqtl-pipeline/code/data_preprocessing/genotype/PCA.html) with a different example data-set.
### For common and infrequent variants
Additionally, we provide a version with variants having sample minor allele count MAC >= 5, for the sake of convenience focusing our single variant analysis. It should be noted that mac and maf don't overwrite each other within our module. It is up to plink to decide how to handle the secnario of both `--maf-filter` and `--mac-filter` > 0.
```
mac="5"
sos run pipeline/GWAS_QC.ipynb qc \
--cwd data_preprocessing/MWE/genotype_data \
--genoFile data_preprocessing/MWE/genotype_data/MWE.MWE.unrelated.bed \
--exclude-variants /mnt/mfs/statgen/snuc_pseudo_bulk/Ast/genotype/dupe_snp_to_exclude \
--maf-filter 0.05 \
--mac-filter $mac \
--name no_outlier_mac$mac \
--container containers/bioinfo.sif
sos run pipeline/GWAS_QC.ipynb qc_no_prune \
--cwd data_preprocessing/MWE/genotype_data \
--genoFile data_preprocessing/MWE/genotype_data/MWE.MWE.related.bed \
--maf-filter 0 \
--geno-filter 0 \
--mind-filter 0.1 \
--hwe-filter 0 \
--mac-filter $mac \
--name no_outlier_mac$mac \
--keep-variants data_preprocessing/MWE/genotype_data/MWE.MWE.unrelated.filtered.prune.in \
--container containers/bioinfo.sif \
--mem 40G
sos run pipeline/GWAS_QC.ipynb qc_no_prune \
--cwd output/data_preprocessing \
--genoFile output/data_preprocessing/mwe_genotype.add_chr.leftnorm.filtered.bed \
--maf-filter 0.05 \
--geno-filter 0.1 \
--mind-filter 0.1 \
--hwe-filter 1e-06 \
--mem 40G \
--mac-filter $mac \
--name no_outlier_mac$mac \
--container containers/bioinfo.sif \
--remove-samples output/data_preprocessing/MWE/pca/MWE.MWE.related.filtered.extracted.pca.projected.outliers
```
## Genotype Reformatting
### Partition by Chromosome
The association analysis of TensorQTL required the chromosome be seperated by chromosome to allowed for parallel analysis.
```
sos run pipeline/genotype_formatting.ipynb plink_by_chrom \
--genoFile output/data_preprocessing/mwe_genotype.add_chr.leftnorm.filtered.filtered.bed \
--cwd output/data_preprocessing \
--chrom `cut -f 1 output/data_preprocessing/mwe_genotype.add_chr.leftnorm.filtered.filtered.bim | uniq | sed "s/chr//g"` \
--container containers/bioinfo.sif
```
### Converted to VCF
If association analysis by APEX is needed, following command convert the per-chromosome plink binaray generated above into indexed vcf.gz.
```
sos run pipeline/genotype_formatting.ipynb plink_to_vcf \
--genoFile plink_files_list.txt \
--cwd output/data_preprocessing \
--container containers/bioinfo.sif
```
**FIXME: still need to add in section for VCF formatted file processing after PCA**
| github_jupyter |
# Lesson 7 Class Exercises: Matplotlib
With these class exercises we learn a few new things. When new knowledge is introduced you'll see the icon shown on the right:
<span style="float:right; margin-left:10px; clear:both;"></span>
## Get Started
Import the Numpy, Pandas, Matplotlib packages and the Jupyter notebook Matplotlib magic
## Exercise 1. Load and clean the data for plotting
Import the Real Minimum Wages dataset from https://raw.githubusercontent.com/QuantEcon/lecture-source-py/master/source/_static/lecture_specific/pandas_panel/realwage.csv
Clean the data by performing the following:
1. Add a new column containing just the year
2. Drop rows with missing values
3. Keep only rows in the series "In 2015 constant prices at 2015 USD PPPs"
4. Keep only rows where the pay period is 'Annual'
5. Drop unwanted columns: 'Unnamed: 0', 'Time' and 'Series'
6. Rename the 'value' column as 'Salary'
7. Reset the indexes
## Exercise 2. Add a quartile group column
Find the quartiles for the minimal annual salary. Add a new column to the dataframe named `Group` that contains the values QG1, QG2, QG3 and QG4 representeding the quartile gropu (QG) to which the row belongs. Rows with a value between 0 and the first quartile get the value QG1, rows between the 1st and 2nd quartile get the value QG2, etc.
## Exercise 3. Create a boxplot
Create a graph using a single axis that shows the boxplots of the four groups. This will allow us to see if we properly separated rows by quartiles. It will also allow us to see the spread of the data in each quartile. Be sure to lable the x-axis tick marks with the proper quantile group name.
## Exercise 4. Create a Scatterplot
Create a single scatterplot to explore if the salaries in quartile group 1 and quartile group 4 are correlated. Hint: to compare two categories we must have an observational unit that is common between them. Be sure to add the x and y axis labels.
Recreate the plot above, but set a different color per year and size the points to be larger for later years and smaller for earlier years.
## Exercise 5. Create a grid of scatterplots
Now, let's see the pairwise scatterplot of each quartile group with every other group. Create a 4x4 grid of subplots. The rows and columns of the subplot represent one of the 4 groups and each plot represents the scatterplot of those groups. You can skip the plots in the diagonal as these will always the same quartile group.
<span style="float:right; margin-left:10px; clear:both;"></span>
Use the following code to ensure that the plot is large enough to see detail:
```python
plt.rcParams["figure.figsize"] = (12, 12)
```
The code above sets the size of the image in "inches" (i.e. 12 x 12 inches). Also, because the x-axis and y-axis labels will be repeated, we only need to set them on the first column and last rows. You can set the y-axis labels on the first column by using the `set` function and providing the `ylabel` argument. For example.
```python
axes[0, 0].set(ylabel="QG1")
```
You can do the same for the x-axis on the bottom row using the same style:
```python
axes[3, 0].set(xlabel="QG1")
```
Do you see any correlation between any of the groups? If so, why do you suspect this is?
## Exercise 6. Create a grid of line plots
Now, let's create a line graph of changes over time for each quartile group. Let's use a 2x2 subplot grid with each grid showing a different group.
| github_jupyter |
```
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from math import ceil
import sklearn.datasets
def prepare_swissroll_data(BATCH_SIZE=1000):
''' This is derived from https://github.com/lukovnikov/improved_wgan_training/blob/master/gan_toy.py
Copyright (c) 2017 Ishaan Gulrajani
Released under the MIT license
https://github.com/lukovnikov/improved_wgan_training/blob/master/LICENSE '''
data = sklearn.datasets.make_swiss_roll(
n_samples=BATCH_SIZE,
noise=0.25
)[0]
data = data.astype('float32')[:, [0, 2]]
data /= 2#7.5 # stdev plus a little
return data
def plot_2d_data(X, title=None, ax=None, ax2=None, vae=None):
if ax is None:
if vae is not None:
fig = plt.figure(figsize=(5,10))
ax = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2)
else:
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(1,1,1)
if title is not None:
ax.set_title(title, fontsize=20)
#ax.set_xlim(-1.6, 2.0); ax.set_ylim(-1.7, 2.1)
ax.scatter(X[:,0], X[:,1], alpha=0.5)
if vae is not None:
sample_size=1000
Z = tf.random.normal(shape=(sample_size, vae.enc.dim_Z))
x_fake = vae.dec(Z)
#ax.set_title("X", fontsize=20)
ax.scatter(x_fake[:,0], x_fake[:,1], marker='.', alpha=0.5, color='red', label='dec(z)')
lgd = ax.legend(fontsize=20, loc='upper left')
for text in lgd.get_texts():
text.set_color("red")
ax2.set_xlim(-3, 3); ax2.set_ylim(-3, 3)
ax2.set_title("Z", fontsize=20)
ax2.scatter(Z[:,0], Z[:,1], marker='.', alpha=0.5, color='red')
mu_enc, l_enc = vae.enc(X)
ax2.scatter(mu_enc[:,0], mu_enc[:,1], alpha=0.5,color='C0', label='enc(x)(mu)')
lgd = ax2.legend(fontsize=20, loc='upper left')
for text in lgd.get_texts():
text.set_color("C0")
def show(X, titles=None, horizontal=False):
batch_size = X.shape[0]
size = ceil(batch_size**(1/2))
if horizontal:
fig = plt.figure(figsize=(batch_size, size))
else:
fig = plt.figure(figsize=(size, size))
for img_label in range(batch_size):
if horizontal:
ax = fig.add_subplot(1, batch_size, img_label+1)
else:
ax = fig.add_subplot(size, size, img_label+1)
ax.axis("off")
try:
ax.imshow(X[img_label])
except TypeError:
ax.imshow(X[img_label,:,:,0], cmap='gray')
if titles is not None:
ax.set_title(titles[img_label])
plt.show()
```
## 8-1. 変分自己符号化器
### 潜在変数 と ニューラルネット
ニューラルネットワークで生成モデル $p_\theta(x)$ を作るにはどうすればよいでしょうか。ニューラルネットワークはここまで紹介してきたとおり「入力」と「出力」があるような関数なのでした。そうすると $x$ しか引数にないようなものを表現できないような気がします。
一つの方法は、何らかの「$x$の特徴を反映した空間」があるとして、その座標を$z$とします。そして $z$ 上に手で確率密度 $p(z)$ を設定し、
$$
p_\theta(x|z)
$$
をモデル化することにする方法があります。$z$ を**潜在変数(latent variable)** といいます。こうしておけば $p_\theta(x, z) = p_\theta(x|z) p(z)$ の $z$ について周辺化を取ると
$$
p_\theta(x) = \int dz \ p_\theta(x|z) p(z)
$$
は $x$ についてのみの確率分布になります。
### 対数尤度とELBO
ところで、負の対数尤度を用いた最尤推定は
$$
- \log p_\theta(x)
=-
\log \Big(
\int dz \ p_\theta(x|z) p(z)
\Big) \tag{1}
$$
が計算できねばならず、これはニューラルネットワークで$p_\theta(x|z)$を実装していると積分値についての閉じた形式を知らないため、困難に思えます。ベイズの定理を思い出すと
$$
{\color{blue} {p_\theta(z|x)}}:=
\frac{p_\theta(x|z) p(z)}{\color{red}{p_\theta(x)}}
$$
によってモデル $p_\theta(x|z)$ を設定した時に、$x$が与えられた時どのような潜在空間の元 $z$ が対応すべきかの **事後確率** を定義できるでしょう。この右辺の分母に ${\color{red}{p_\theta(x)}}$ があるので、$(1)$ の左辺に代入してみましょう。
$$
-\log {\color{red}{p_\theta(x)}} = - \log \frac{p_\theta(x|z) p(z)}{\color{blue}{p_\theta(z|x)}}
$$
一見、特にこれ以上何もできなさそうですが、ここで一工夫します。logの分子は潜在モデル $p_\theta(x|z)$ で書けていますが、分母の事後確率 ${\color{blue}{p_\theta(z|x)}}$ が難しいわけです。そこで事後確率を「まねる」モデル ${\color{green}{q_\varphi(z|x)}}$ を導入してみます。パラメータは後で決めます。これを $1 = \frac{{\color{green}{q_\varphi(z|x)}}}{{\color{green}{q_\varphi(z|x)}}}$ として log の内部に掛け算します:
$$
\left. \begin{array}{ll} -\log {\color{red}{p_\theta(x)}}
&=- \log \Big( \frac{p_\theta(x|z) p(z)}{\color{blue}{p_\theta(z|x)}}
\times
\frac{{\color{green}{q_\varphi(z|x)}}}{{\color{green}{q_\varphi(z|x)}}}
\Big)
\\
&= - \Big(
\log \frac{{\color{green}{q_\varphi(z|x)}}}{\color{blue}{p_\theta(z|x)}}+
\underbrace{
\log \frac{p_\theta(x|z) p(z)}{{\color{green}{q_\varphi(z|x)}}}
}_{-
\log \frac{{\color{green}{q_\varphi(z|x)}}}{p(z)}+
\log p_\theta(x|z)
}
\Big)
\\
&= - \log \frac{{\color{green}{q_\varphi(z|x)}}}{\color{blue}{p_\theta(z|x)}}+ \log \frac{{\color{green}{q_\varphi(z|x)}}}{p(z)} -
\log p_\theta(x|z)
\end{array} \right.
\tag{2}
$$
この量は $z$ に依存するように見えますが、もとの定義 $(1)$ に戻ると、$x$ に依存するのみで $z$ には依存しないことがわかります。$z$に依存しないので任意の $z$ の確率分布で期待値計算(積分)しても値が不変なはずです。そこで $z$ の確率分布として ${\color{green}{q_\varphi(z|x)}}$ について期待値を取ることにします:
$$
(2)
\text{=}-
\underbrace{
\Big\langle \log \frac{{\color{green}{q_\varphi(z|x)}}}{\color{blue}{p_\theta(z|x)}}\Big\rangle_{{\color{green}{q_\varphi(z|x)}}}}_{
D_{KL}\big(
{\color{green}{q_\varphi(z|x)}}
\big\|
{\color{blue}{p_\theta(z|x)}}
\big)
}
+
\underbrace{\Big\langle\log \frac{{\color{green}{q_\varphi(z|x)}}}{p(z)}\Big\rangle_{\color{green}{q_\varphi(z|x)}}}_{
D_{KL}\big(
{\color{green}{q_\varphi(z|x)}}
\big\|
p(z)
\big)
}-
\langle \log p_\theta(x|z) \rangle_{\color{green}{q_\varphi(z|x)}}
$$
となるので、スタート地点の負の対数尤度に戻ると
$$
\left. \begin{array}{ll} - \log {\color{red} {p_\theta (x)}}
&=
\underbrace{-
D_{KL}\Big(
{\color{green}{q_\varphi(z|x)}}
\Big| \Big|
{\color{blue}{p_\theta(z|x)}}
\Big)}_{\leq 0}
+
D_{KL}\Big(
{\color{green}{q_\varphi(z|x)}}
\Big| \Big|
p(z)
\Big) -
\langle \log p_\theta(x|z) \rangle_{\color{green}{q_\varphi(z|x)}}
\\
&\leq %%%%
D_{KL}\Big(
{\color{green}{q_\varphi(z|x)}}
\Big| \Big|
p(z)
\Big)-
\langle \log p_\theta(x|z) \rangle_{\color{green}{q_\varphi(z|x)}} =: L
\end{array} \right.
$$
と書き直せることがわかります。最尤推定ではこの左辺の値をなるべく小さくしたかったわけですが、代わりに最後の $L$ を小さくすると不等式関係から左辺の値も小さくなります。しかも(左辺)=(右辺)となる場合は${\color{green}{q_\varphi(z|x)}}=
{\color{blue}{p_\theta(z|x)}}$に対応します。${\color{green}{q_\varphi(z|x)}}$ は事後確率 ${\color{blue}{p_\theta(z|x)}}$ を真似ることが目的だったので、これも都合が良さそうです。
> このように、事後確率を何らかの別のモデルで近似しにかかるテクニックを「変分推定」といいます。これは多体シュレディンガー方程式の基底状態を求めるときなどに使う、波動関数の変分法と同じ意味合いの用語です。
これらを総合すると、$L$ を小さくすることは最尤推定をしつつ、もしかしたら事後分布推定もできるかもしれないわけです。このような $L$ をevidence lower bound (ELBO)と言います。
> ほんとうは $-L$ がELBOと呼ばれるものです。ELBOと言うときは負の対数尤度最小化ではなく、対数尤度最大化でよく説明されるので、その場合不等式の向きが逆になって $-L$ が下限になります。
そこで元の最尤推定を $L$ の最小化問題に置き換えて解くことが考えられます
$$
\text{min}_{\theta, \varphi} \Big\{
D_{KL} \Big(
q_\varphi (z | x) \Big| \Big|
p(z)
\Big)-
\Big\langle
\log p_\theta(x| z)
\Big\rangle_{q_\varphi (z | x)}
\Big\}
$$
### 変分自己符号化器
この問題設定では、潜在空間上に固定された確率分布 $p(z)$ と
$$
\text{encoder: }
q_\varphi(z|x),\quad \text{decoder: } p_\theta(x|z)
$$
がありますが、[arXiv:1312.6114](https://arxiv.org/abs/1312.6114) にて、ここまでの話を深層ニューラルネットで解く手法が考案されました。それが変分自己符号化器(Variational AutoEncoder, VAE)とよばれるものです。
#### Encoder
上に挙げた確率分布や確率モデルを以下のように設定します:
* $p(z) = \frac{1}{\sqrt{2 \pi}} e^{- \frac{1}{2} z^2 }$
* $q_\varphi(z| x)=
\frac{1}{\sqrt{2 \pi \sigma^2_\varphi(x)}} e^{- \frac{1}{2 \sigma^2_\varphi(x)} (z - \mu_\varphi(x))^2 }$
潜在空間の次元を1次元にしない場合は、ガウス分布としては次元毎に直積を取る形にします。するとKLダイバージェンスの項は1節で計算したガウス分布間のKLダイバージェンスより
$$
D_{KL} \Big(
q_\varphi (z | x) \Big| \Big|
p(z)
\Big) =
\frac{1}{2} \Big(- \log [\sigma_\varphi(x)]^2 - 1 + [\sigma_\varphi(x)]^2 + [\mu_\varphi(x)]^2
\Big)
$$
となります。実装上は $\sigma$ そのものよりも $l =\log \sigma^2$ を使ったほうが良いようです。つまりEncoderを
```
class Encoder(tf.keras.Model):
def __init__(self, dim_Z):
pass
def call(self, x):
mu, l = None #
return mu, l
```
のように $q_\varphi (z | x)$ は $x \to [\mu_\varphi(x), \underbrace{\log[\sigma_\varphi(x)]^2}_{=:l_\varphi(x)}]$ をニューラルネットで実装します。この場合KL lossは
$$
\frac{1}{2} \Big(- l_\varphi(x) - 1 + e^{l_\varphi(x)} + [\mu_\varphi(x)]^2
\Big)
$$
なので、そのように実装しておきます:
```
def KL_loss(mu, l):
return tf.reduce_mean(-l - 1 + tf.exp(l) + mu**2)/2
```
#### Decoder
こちらは 潜在ベクトル $z$ を受け取って $p_\theta(x|z)$ が出力できれば良いだけなので、そのままニューラルネットでOKでしょう。問題は $L$ の二項目
$$
-\langle p_\theta(x|z)\rangle_{q_\varphi(z|x)}
$$
をどう計算するかです。これはサンプリングで近似するのが一つの方法ですが、$q_\varphi(z|x)$ から $z$ をサンプルするだけだと、そこで $\varphi$ の依存性が消えてしまい、encoderへと誤差が伝わりません。そこで使われるのが **reparametrization trick** と呼ばれるものです。平均 $\mu$, 分散 $\sigma^2$ のガウス分布からのサンプリングは、$\epsilon$ を平均 $0$ 分散 $1$ のガウス分布からのサンプリングとして
$$
z = \mu + \epsilon * \sigma
$$
で実現できます。これにencoder出力の $\mu_\varphi(x), l_\varphi(x)$ を適用して
$$
z = \mu_\varphi(x) + \epsilon * \exp(l_\varphi(x)/2)
$$
とします。こうしておくと $\varphi$ の依存性が残って、訓練可能になります。このような関数を作っておきます:
```
def sample_from_N(mu, l):
''' reparametrization trick '''
epsilon = tf.random.normal(shape=l.shape)
return mu + epsilon*tf.keras.backend.exp(l/2)
```
この関数の出力 $z$ を入力としたニューラルネットとして decoder は構成します。つまり
```
class Decoder(tf.keras.Model):
def __init__(self, dim_Z):
pass
def call(self, z):
x = None
return x
```
のようなものを実装すればOKで、lossは
$$
-\log p_\theta(x|z_\text{sampled})
$$
から設定毎に導かれる誤差関数を選べばよいです。この項を**reconstruction loss** といいます。
#### 全体の構造
VAEモデルは2つのネットワークから成ります
```
class VAE(tf.keras.Model):
def __init__(self, enc, dec):
super(VAE, self).__init__()
self.enc = enc
self.dec = dec
def update(X, vae, opt, reconstruction_loss): # 学習ステップ
with tf.GradientTape() as tape:
# encoding step
mu, l = vae.enc(X)
# decoding step
Z = sample_from_N(mu, l)
X_decoded = vae.dec(Z)
# loss
loss = KL_loss(mu, l) + reconstruction_loss(X, X_decoded)
# update
grads = tape.gradient(loss, vae.trainable_variables)
opt.apply_gradients(zip(grads, vae.trainable_variables))
return loss
```
### 2次元でVAE
以下のように $X$ を実2次元空間として、その上にスイスロール状に分布したデータを教師なしデータとして、VAEを訓練してみましょう。次節のGANでもスイスロール状データで実験しますが、大きさが微妙に違います(上手くいくパラメータを探した結果こうなりました)。
```
X_2d = prepare_swissroll_data() # 出力はnumpy.array
plot_2d_data(X_2d, title='data'); plt.show()
```
#### Encoder
3回`Dense`でつないだ単純なものを作ってみました。activationがtanhですが他のやつでも上手くいくかもです。
```
class Encoder_2d(tf.keras.Model):
def __init__(self, dim_Z=2, dim_feature=32):
super(Encoder_2d, self).__init__()
initializer = tf.initializers.he_normal()
self.l1 = tf.keras.layers.Dense(dim_feature, kernel_initializer=initializer)
self.l2 = tf.keras.layers.Dense(dim_feature, kernel_initializer=initializer)
self.l3 = tf.keras.layers.Dense(2*dim_Z, kernel_initializer=initializer)
self.dim_Z =dim_Z
def call(self, x):
h = tf.keras.activations.tanh(self.l1(x))
h = tf.keras.activations.tanh(self.l2(h))
h = self.l3(h)
mu= h[:, :self.dim_Z]
l = h[:, self.dim_Z:]
return mu, l
```
#### Decoder
こちらも大体似たモデルですactivationはleaky relu(負の部分も少し傾きがあるrelu)です。
```
class Decoder_2d(tf.keras.Model):
def __init__(self, dim_feature=32):
super(Decoder_2d, self).__init__()
initializer = tf.initializers.he_normal()
self.l1 = tf.keras.layers.Dense(dim_feature, kernel_initializer=initializer)
self.l2 = tf.keras.layers.Dense(dim_feature, kernel_initializer=initializer)
self.l3 = tf.keras.layers.Dense(2, kernel_initializer=initializer)
def call(self, z):
h = tf.nn.leaky_relu(self.l1(z))
h = tf.nn.leaky_relu(self.l2(h))
h = self.l3(h)
return h
def generate(self, shape):
Z = tf.random.normal(shape=shape)
return self(Z)
```
#### Training
再構成誤差は2次元の距離にしました。訓練中のテストは実際に2次元にプロットすることにします。
```
def reconstruction_loss_2d(x, x_decoded):
return tf.reduce_mean(tf.keras.losses.MSE(x, x_decoded))
def train_2d(vae, X_2d, batch_size, epochs):
tf_update = tf.function(update)
opt = tf.keras.optimizers.Adam(1e-2)
D = tf.data.Dataset.from_tensor_slices(X_2d)
fig = plt.figure(figsize=(5*6,10))
plotlabel = 1
for epoch in range(epochs):
batch = D.shuffle(70000).batch(batch_size, drop_remainder=True)
for X in batch:
loss = tf_update(X_2d, vae, opt, reconstruction_loss_2d)
if epoch%(epochs//5)==0:
# test
#print("L =", loss)
ax = fig.add_subplot(2,6,plotlabel)
ax2 = fig.add_subplot(2,6,plotlabel+6)
plot_2d_data(X_2d, title="{}-epoch".format(epoch), vae=vae, ax=ax, ax2=ax2)
plotlabel += 1
plt.show()
return vae
```
以下の訓練は CPU でもかなり早く完了できます。テストでは
* 上段:データ空間、青色=データ点、赤色=デコーダーが潜在空間の赤点からデコードした点
* 下段:潜在空間、青色=エンコーダーがデータ空間のデータ点をエンコードした点($\mu$のみ)、赤色=正規分布$p_z$からのサンプル
として表示しています。
```
%%time
enc_2d = Encoder_2d()
dec_2d = Decoder_2d()
vae_2d = VAE(enc_2d, dec_2d)
train_2d(vae_2d, X_2d, batch_size=32, epochs=100)
```
このように、うまくいけば
* データ点 $\to p_z$ への埋め込み
* $p_z \to$ データ点への埋め込み
が互いにうまく達成されます。
### MNISTでVAE
次に MNIST でラベル情報を捨てた教師なし学習をVAEでやってみましょう。ネットワークの構成などのパラメータ設定はtensorflowのチュートリアルを参考にしています。まず MNIST をピクセル値 $\in [0,1]$ にして読み込みます。
```
mnist = tf.keras.datasets.mnist
(X, Y), (X_test, Y_test) = mnist.load_data()
X, X_test = (X/255).astype(np.float32), (X_test/255).astype(np.float32)
X_train = tf.reshape(tf.concat([X, X_test], axis = 0), shape=(70000, 28, 28, 1))
```
#### Encoder
潜在空間の次元を `dim_Z` とかくことにして、畳込み演算2回と最後に線形変換です:
```
class Encoder_mnist(tf.keras.Model):
''' This model definition is derived from the encoder model defined in
https://www.tensorflow.org/tutorials/generative/cvae
which is licensed under Apache 2.0 License. '''
def __init__(self, dim_Z):
super(Encoder_mnist, self).__init__()
self.c1 = tf.keras.layers.Conv2D(filters=32, kernel_size=3, strides=(2, 2), activation='relu')
self.c2 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=(2, 2), activation='relu')
self.flatten = tf.keras.layers.Flatten()
self.dense = tf.keras.layers.Dense(2*dim_Z)
self.dim_Z = dim_Z
def call(self, x):
''' x.shape = (batchsize, Lx, Ly, C) '''
h = self.c1(x)
h = self.c2(h)
h = self.flatten(h)
h = self.dense(h)
mu= h[:, :self.dim_Z]
l = h[:, self.dim_Z:]
return mu, l
```
#### Decoder
転置畳み込み(畳み込みの逆演算)を用いてどんどん画素を大きくするような構造をしています:
```
class Decoder_mnist(tf.keras.Model):
''' This model definition is derived from the decoder model defined in
https://www.tensorflow.org/tutorials/generative/cvae
which is licensed under Apache 2.0 License. '''
def __init__(self):
super(Decoder_mnist, self).__init__()
self.dense = tf.keras.layers.Dense(units=7*7*32, activation='relu')
self.reshape= tf.keras.layers.Reshape(target_shape=(7, 7, 32))
self.dc1 = tf.keras.layers.Conv2DTranspose(64, kernel_size=3, strides=2, padding='same', activation='relu')
self.dc2 = tf.keras.layers.Conv2DTranspose(32, kernel_size=3, strides=2, padding='same', activation='relu')
self.dc3 = tf.keras.layers.Conv2DTranspose(1, kernel_size=3, strides=1, padding='same')
def call(self, z):
h = self.dense(z)
h = self.reshape(h)
h = self.dc1(h)
h = self.dc2(h)
h = self.dc3(h)
return tf.keras.activations.sigmoid(h)
def generate(self, shape):
''' batch_size, dim_Z = shape '''
Z = tf.random.normal(shape=shape)
return self(Z)
```
最後にsigmoid関数をかけたのは、`X_train` のピクセル値 $\in[0,1]$ としたので、これに合わせるためです。ところでこの場合、各ピクセル毎に確率値が割り振られているので、$-\log p_\theta(x|z_\text{sampled})$ は通常のクロスエントロピーで良いでしょう:
```
def reconstruction_loss_mnist(x, x_decoded):
return tf.reduce_mean(tf.reduce_sum(tf.keras.losses.binary_crossentropy(x, x_decoded), axis=[1, 2]))
```
訓練用の関数は以下のようになります:
```
def train(vae, X_train, batch_size, epochs):
tf_update = tf.function(update)
opt = tf.keras.optimizers.Adam(1e-4)
D = tf.data.Dataset.from_tensor_slices(X_train)
for epoch in range(epochs):
batch = D.shuffle(70000).batch(batch_size, drop_remainder=True)
for X in batch:
loss = tf_update(X, vae, opt, reconstruction_loss_mnist)
if epoch%1==0 or epoch==(epochs-1):
# test
print("L =", loss)
dim_Z = vae.enc.dim_Z
X_generated = vae.dec.generate(shape=(9, dim_Z))
show(X_generated)
return vae
```
実際に訓練してみます。各epoch毎に誤差関数 $L$ の値と $z$ を平均 $0$ 分散 $1$ のガウス分布からサンプルしてdecodeしたものを表示しています。GPUで1分未満で終わると思いますがCPUだと時間がかかります。
```
enc = Encoder_mnist(dim_Z=2)
dec = Decoder_mnist()
vae = VAE(enc, dec)
train(vae, X_train, batch_size=32, epochs=5)
```
#### 画像の「ホモトピー」
訓練後、潜在ベクトル二点の間をinterpolate
$$
x(t) = dec\big( z(t) \big), \quad z(t):z_1 \to z_2
$$
すると、生成画像がなめらかにつながる様子が確認できます:
```
Z1 = tf.random.normal(shape=(1, vae.enc.dim_Z))
Z2 = tf.random.normal(shape=(1, vae.enc.dim_Z))
for t in np.linspace(0,1,10):
if t==0:
X_list = vae.dec(Z1)
else:
X_generated = vae.dec((1-t)*Z1+t*Z2)
X_list = tf.concat([X_list, X_generated], axis=0)
show(X_list, titles=["t={:.02f}".format(t) for t in np.linspace(0,1,10)], horizontal=True)
```
### 関連する話題
VAEを用いて文章生成も可能です。例えば[arXiv:1511.06349](https://arxiv.org/abs/1511.06349) などがあります。この論文ではKL項の前に逆温度を設け、encoderが潰れないように徐々に冷やす正則化がなされています。このようなKL項に温度を設定するVAEは $\beta$-VAE と呼ばれ、盛んに研究されているようです([論文リンク](https://openreview.net/forum?id=Sy2fzU9gl))。
また、ここでは $p(z)$ や $q_\varphi(z|x)$ をガウス分布に取りましたが、最近ではこれらを離散確率分布に取る「ベクトル量子化変分自己符号化器(Vector Quantized Variational Auto-Encoder, VQ-VAE)」というのが良い性能を出すことがわかってきています:[arXiv:1711.00937](https://arxiv.org/abs/1711.00937), [arXiv:1906.00446](https://arxiv.org/abs/1906.00446)。
### Comments on the licensed source code used in this subsection
#### License for `prepare_swissroll_data()`
In this notebook, the definitions of
* function: `prepare_swissroll_data`
includes codes derived from
https://github.com/lukovnikov/improved_wgan_training/blob/master/gan_toy.py
which is licensed as follows:
> Copyright (c) 2017 Ishaan Gulrajani <br>
> Released under the MIT license <br>
> https://github.com/lukovnikov/improved_wgan_training/blob/master/LICENSE
#### License for definitions of `Encoder_mnist`, `Decoder_mnist`
In addition, the definitions of
* classes: `Encoder_mnist`, `Decoder_mnist`
include codes derived from
https://www.tensorflow.org/tutorials/generative/cvae
which is licensed under [Apache 2.0 License](https://www.apache.org/licenses/LICENSE-2.0). For details, see their [Site Policies](https://developers.google.com/terms/site-policies).
| github_jupyter |
# Tutorial de Python
Tutorial de Python 3.6
## Tipos de dados
Em python você não precisa declarar as variaveis e nem especificar o tipo dela. Uma mesma variável também pode receber dados de tipos diferentes.
```
# Mesma variável recebendo tipos diferentes
var = 5
print(var)
var = "oi"
print(var)
var = 3.14
print(var)
# transformando de inteiro para float
print("Int -> float", float(5))
# transformando de float para inteiro
print("Float -> int", int(3.1415))
# transformando de inteiro para string
print("Int -> str", str(234))
```
## Operações básicas
Python suporta as mesmas operações aritiméticas de C com algumas a mais.
```
x = 2
y = 3
# Adição
print("Add:", y + x)
# Subtração
print("Sub:", y - x)
# Multiplicação
print("Mult:", y * x)
# Divisão real
print("Div:", y / x)
# Divisão inteira
print("Div int:", y // x)
# Exponenciação
print("Exp:", y ** x)
# Não exite x++
x += 1
print("x++ ", x)
```
### Operações lógicas
Vendo como a sintaxe para lógica é diferente. Aqui há nativamente true e false
```
# AND
print(True and False)
# OR
print(True or False)
# Variaveis
print(var > y)
# NOT
print(not True)
```
### Strings
Algumas funções que já vem prontas para o tratamento de strings
```
s = 'oi, tudo bem!'
# Split
print("Split em espaço:", s.split())
# Replace
print("Replace na string:", s.replace('!', '?'))
```
## Listas
Como fazer arrays em Python e a principal diferença entre C
```
# Os elementos de uma lista não precisam ser do mesmo tipo
l = [1, 3.14, 5, 7, 8, 'eita', []]
print(l)
```
### Indexando listas
Não tem só uma forma de indexar
```
l = [1, 3, 5, 7, 8, 'eita']
# Primeiro elemento
print(l[0])
# Três primeiros elementos
print(l[0:4])
# Último elemento
print(l[-1])
# Do começo até o fim de dois em dois
print(l[::2])
# Invertendo a lista
print(l[::-1])
```
### Aumentando as listas
```
x = [1, 2, 3]
y = [5, 6, 7]
# Adicionando um elemento
x.append(4)
print(x)
# Juntando duas listas
print(x + y)
# Multiplicando a lista
print(x * 3)
# Jeito padrão de fazer um vetor só de zeros em python
zeros = [0] * 10
print(zeros)
```
## Fors
Sua cabeça pode explodir nessa seção
```
# For de 0 a 4 printando cada elemento
for i in range(5):
print(i)
# Lista que vai de 0 a 9
l = []
for i in range(10):
l.append(i)
print(l)
```
Há uma grande diferença da forma como Python e C fazem uso do for
```
# For sobre lista como em C
for i in range(len(l)):
print(l[i])
# For sobre lista python
for x in l:
print(x)
```
### Pythonic way - List comprehension
```
# Lista que vai de 0 a 9. Faz a mesma coisa que o código que gerou a lista l só que em uma linha
[i for i in range(10)]
# Lista de 0 a 9 só com pares
[i for i in range(10) if i % 2 == 0]
# For em duas listas ao mesmo tempo
x = ['a', 'b', 'c']
y = [1, 2, 3]
for letra, numero in zip(x, y):
print(letra, numero)
```
## Dicionários
Dicionários são tabela hash que armazenam uma chave e um valor
```
a = {'oi': 5, 'tchau': 10}
a
```
É possível iterar sobre o dicionário da mesma forma como sobre as listas, só precisa escolher sobre o que
```
# Keys
print(a.keys())
# Values
print(a.values())
# Items
print(a.items())
```
## Função
Como sempre, é possível usar como se fosse em C mas o Python apresenta algumas funcionalidades a mais
```
# Função normal
def fun(a, b):
return a * b
fun(4, 5)
# Função que retorna 2 elementos
def fun2(a, b):
return a * 2, b * 2
fun2(1, 2)
# Função com argumento padrão
def func3(a, b = 10):
return a * b
# Chamando normalmente
print(func3(5, 2))
# Sem passar o parametro
print(func3(5))
```
| github_jupyter |
# Make sentence evaluation sample dataset
We want to sanity check the accuracy of the [ArgumenText](https://api.argumentsearch.com/en/doc) API. One way to do this is spot checks on the results, and using those spot checks to estimate precision and recall.
**Precision**
Also known as "positive predictive value."
Out of all of the retrieved instances, how many of them are correctly retrieved/classified?
For argument mining, this is evaluated by looking at all of the sentences classified as an argument, and verifying how many of them actually contain an argument.
**Recall / Sensitivity**
Also known as the true positive rate.
What is the proportion of classified positives given all of the labeled positives. The extend of how often actual positives are not overlooked.
High sensitivity means that a true positive is rarely overlooked. This often comes at a loss of specificity.
**Specificity**
Also known as the true negative rate.
What is the proportion of classified negatives given all of the labeled negatives.
High specificity means that a true negative is rarely overlooked.
```
# reload local package definitions for each cell
%load_ext autoreload
%autoreload 2
import os
import time
from dotenv import find_dotenv, load_dotenv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix, roc_curve, auc, precision_recall_curve
from arg_mine import DATA_DIR, FIGURES_DIR
from arg_mine.data import loaders, labelers
from arg_mine.api import classify, auth, session, errors
from arg_mine.visualization.plot_utils import make_confusion_matrix, make_roc_curve, make_precision_recall_curve
from arg_mine import utils
from arg_mine.metrics import summary_stats
SEED = 12345
sns.set_style("whitegrid")
# load the 2020 context snippet, for access later
csv_datapath = os.path.join(DATA_DIR, "raw", "2020-climate-change-narrative")
csv_filepath = os.path.join(csv_datapath, "WebNewsEnglishSnippets.2020.csv")
url_df = loaders.get_gdelt_df(csv_filepath)
start_time = time.time()
data_processed_project = "gdelt-climate-change-docs"
base_path = os.path.join(DATA_DIR, "processed", data_processed_project)
docs_df = loaders.load_processed_csv("gdelt_2020_docs_docs0-999.csv", data_processed_project)
print("data load took {:0.1f} s".format(time.time() - start_time))
sentences_full_df = loaders.load_processed_csv("gdelt_2020_sentences_docs0-999.csv", data_processed_project, drop_nan_cols='sentence_original')
arg_label_counts = sentences_full_df.argument_label.value_counts()
print(arg_label_counts)
print("\nTotal # of sentences: {}".format( sentences_full_df.shape[0]))
class_pct = arg_label_counts / sentences_full_df.shape[0]
print(class_pct)
class_pct.plot(kind="bar")
_ = plt.title("percentage of sentence classes")
```
# Evaluate the ArgText classification vs human labels
# 600 sentence manual review
We look at 600 sentences with a balanced split between arg/non-arg sentences, 300 of each.
This balanced dataset gives us information of how well the algorithm performs under the assumption of an equal distribution of classes. This, of course, is not true, but it gives us a feel for how the model performs under ideal circumstances.
```
review_output_filename = "argtext_sentences_manual_review_1k_reviewed600.csv"
review_output_filename = os.path.join(DATA_DIR, "interim", review_output_filename)
reviewed_sent_df = pd.read_csv(review_output_filename)
reviewed_sent_df.dropna(inplace=True)
y_model = reviewed_sent_df.is_arg.astype(int)
y_model_conf = reviewed_sent_df.argument_confidence
y_label = reviewed_sent_df.binarized.astype(int)
stats_balanced = summary_stats(y_label, y_model, y_model_conf, name="balanced classes")
stats_balanced.at['balanced classes', "threshold"] = 0.5
stats_balanced
reviewed_sent_df.argument_label.value_counts()
n_bins = 20
fig, ax = plt.subplots()
_ = plt.hist(
reviewed_sent_df.loc[reviewed_sent_df['argument_label'] == classify.ArgumentLabel.NO_ARGUMENT, 'argument_confidence'],
bins=n_bins, label='not arg')
_ = plt.hist(
reviewed_sent_df.loc[reviewed_sent_df['argument_label'] == classify.ArgumentLabel.ARGUMENT, 'argument_confidence'],
bins=n_bins, label='arg')
plt.xlabel("probability is arg")
plt.title("Probability sentence is argument, by class")
_ = plt.legend()
```
### Confusion matrix
```
cf_matrix = confusion_matrix(y_label, y_model)
fig = make_confusion_matrix(cf_matrix, categories=["not arg", "arg"], title="confusion matrix")
plt.savefig(os.path.join(FIGURES_DIR, "argtext_validation_600sentence_cm.png"), bbox_inches='tight')
```
Because this dataset is balanced (equal positive and negative samples), we would ideally see the same number on the diagonal, and nothing in the off-diagonal.
What we see is that the false positives (upper right) and false negatives (lower left) are fairly balanced, given a balanced presence of positive and negative sentences in the evaluation set. This may not be the case in the unbalanced "natural" set.
### ROC curve
Typically, we want to have as low of a FPR and as high of a TPR as possible, effectively pushing the curve up and to the left. The unity line denotes random chance for this binomial problem.
```
ax = make_roc_curve(y_label, y_model_conf, selected_thresh=0.5)
```
Note that the False Positive Rate (FPR) and True Positive Rate (TPR) can both be changed by adjusting the threshold used for the binary classification.
If we are willing to have a higher true positive rate at the cost of more false positives, we can decrease the threshold. This will also result in more positive classifications overall.
Below we decrease the threshold to 0.05, which increases the TPR to 90%, and also increases the FPR to 28%. This means that out of all of the sentences that are actually not arguments, 25% of them will be classified as arguments.
```
ax = make_roc_curve(y_label, y_model_conf, selected_thresh=0.05)
```
We can also increase the threshold, which will decrease the FPR rate, but will also decrease the TPR rate.
```
ax = make_roc_curve(y_label, y_model_conf, selected_thresh=0.8)
```
### Precision Recall
We want both high precision and high recall. 80%/80% can be a reasonable target, without requiring additional tuning on the model training.
High precision means more sentences classified as arguments are actually arguments.
High recall means out of all of the sentences that are actually arguments, more of them are classified as arguments
```
ax = make_precision_recall_curve(y_label, y_model_conf, selected_thresh=0.5)
```
This is a good result from the balanced dataset, and a threshold of 0.5 gives us about what we would like to see in precision and recall.
Unfortunately, the sentence classes aren't balanced, so we need to evaluate the performance of the model on the unbalanced dataset. This will likely reduce the precision, as there are fewer sentences that are actually arguments to classify them as arguments.
# Naturalized sample from 600 sentences
The above analysis was to look at overall recall/precision for a balanced dataset. But the classes aren't balanced. What do these numbers look like if we downsample the `argument` class to match the natural distribution, around 21%.
To do this, we use the ground truth labels to identify which sentences are not arguments vs arguments, and downsample the number of argument sentences to match the expected population ratio (21% positive)
```
class_pct
# sample based on presumed ground truth labels, NOT the prediction
negative_samples = reviewed_sent_df.loc[reviewed_sent_df.binarized == 0]
n_negative = negative_samples.shape[0]
# TODO: simplify this math? There should be an easier formula than (nneg/pct_neg - nneg). nneg*(1/pct_neg - 1)?
n_positive = np.round(n_negative/class_pct['no argument'] - n_negative).astype(int)
total_samples = np.round(n_negative / class_pct['no argument']).astype(int)
positive_samples = reviewed_sent_df.loc[reviewed_sent_df.binarized == 1].sample(n=n_positive)
reviewed_natural_df = pd.concat([negative_samples, positive_samples]).sample(frac=1).reset_index(drop=True)
y_model = reviewed_natural_df.is_arg.astype(int)
y_model_conf = reviewed_natural_df.argument_confidence
y_label = reviewed_natural_df.binarized.astype(int)
stats_natural = summary_stats(y_label, y_model, y_model_conf, name="natural classes")
stats_natural.at['natural classes', "threshold"] = 0.5
stats_natural
```
### Confusion matrix
```
cf_matrix = confusion_matrix(y_label, y_model)
fig = make_confusion_matrix(cf_matrix, categories=["not arg", "arg"], title="confusion matrix")
plt.savefig(os.path.join(FIGURES_DIR, "argtext_validation_382sentence_cm_natural.png"), bbox_inches='tight')
```
We see that the model has a high percentage of false positives in the context of a natural prevalence of arg/not-arg sentences.
As discussed above, this high percentage of false positives could be reduced by increasing the binarizing threshold. This will also in turn affect the precision and recall.
### ROC curve
Typically, we want to have as low of a FPR and as high of a TPR as possible, effectively pushing the curve up and to the left. The unity line denotes random chance for this binomial problem.
```
ax = make_roc_curve(y_label, y_model_conf, selected_thresh=0.5)
```
This curve looks similar to the unbalanced curve, but with fewer positive samples it is coarser and loses resolution.
### Precision Recall
We want both high precision and high recall. 80%/80% can be a reasonable target, without additional tuning on the model training.
High precision means more sentences classified as arguments are actually arguments.
High recall means out of all of the sentences that are actually arguments, more of them are classified as arguments
```
ax = make_precision_recall_curve(y_label, y_model_conf, selected_thresh=0.5)
```
# Model comparison
Why are the imbalanced and balanced outcomes so different? Let's first compare the aggregated scores.
```
model_stats = pd.concat([stats_balanced, stats_natural], axis=0)
model_stats
# higher is better for all of these metrics
model_stats.drop("threshold", axis=1).T.plot.bar(figsize=(8,6))
```
We see a pretty dramatic drop in precision for the unbalanced dataset, corresponding to fewer arguments present and thus a lower accuracy on just positive predictions.
This could be adjusted by looking at different thresholds from the classification confidence. Let's iterate through a few examples to see the effects.
```
# add different thresholds for the natural distribution
y_label = reviewed_natural_df.binarized.astype(int)
y_model_conf = reviewed_natural_df.argument_confidence
thresholds = [0.1, 0.25, 0.5, 0.75, 0.9]
thresh_stats = []
y_thresh_out = {}
for thresh in thresholds:
y_thresh_out[thresh] = (y_model_conf > thresh).astype(int)
name = "natural, t={}".format(thresh)
tmp = summary_stats(y_label, y_thresh_out[thresh], y_model_conf, name=name)
tmp.at[name, "threshold"] = thresh
thresh_stats.append(tmp)
thresh_stats_df = pd.concat(thresh_stats, axis=0)
thresh_stats_df
# higher is better for all of these metrics
thresh_stats_df.drop("threshold", axis=1).T.plot.bar(figsize=(8,6))
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
_ = plt.title("Natural dataset summary statistics, varying thresholds")
plt.savefig(os.path.join(FIGURES_DIR, "argtext_validation_382sentence_natural_summary_stats.png"), bbox_inches='tight')
```
The biggest change that we see is in recall, and as expected we see a concomitant increase in precision with the decrease in recall. The sharp drop in recall with increasing threshold means that we are catching fewer of the sentences that are arguments.
Given these plots, we can see that a threshold value of 0.5 may be a reasonable level for optimizing precision and recall. If we are okay with having more sentences that do NOT contain arguments, we can decrease the threshold to something lower than 0.5.
## Save the mismatched sentences for review
Which sentences is the model missing as an argument?
```
mismatched_df = reviewed_natural_df[reviewed_natural_df.binarized != reviewed_natural_df.is_arg]
mismatched_df = mismatched_df.astype({"has_argument": int, "binarized": int}).rename(columns={"binarized": "label"})
mismatched_df.head()
mismatched_df.shape
mismatched_df.has_argument.value_counts()
```
An interesting note is that out of the manually reviewed sentences, most of them are in the intentional "fuzzy" category. When I labeled these sentences, I used a `2` to refer to sentences that may contain an argument, but it was not clear if the argument was related to the topic or not.
When the `has_argument` column is reduced to "positive" or "negative" (0 or 1), I defined all 2s to be "not argument. Maybe this was not the best decision, so we can reexamine this result.
```
# write out the sentences
mismatched_filename = "argText_mismatched_natural.csv"
mismatched_filename = os.path.join(DATA_DIR, "interim", mismatched_filename)
mismatched_df.set_index('row_number').to_csv(mismatched_filename)
```
| github_jupyter |
```
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
iris = datasets.load_iris()
X = iris.data #raw data
Y = iris.target #known groups (only for supervised analysis, I think)
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
X_r
lda = LinearDiscriminantAnalysis(n_components=2)
X_r2 = lda.fit(X, Y).transform(X)
plt.figure()
colors = ['navy', 'turquoise', 'darkorange']
lw = 2
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r[Y == i, 0], X_r[Y == i, 1], color=color, alpha=0.8, lw=lw, label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of IRIS dataset')
plt.figure()
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r2[Y == i, 0], X_r2[Y == i, 1], color=color, alpha=0.8, lw=lw, label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('LDA of IRIS dataset')
plt.show()
import diff_classifier.aws as aws
import pandas as pd
import numpy as np
import mpl_toolkits.mplot3d.axes3d as p3
rfol = '01_18_Experiment/P3'
features = 'features_P3_S1_R_0012'
rfile = '{}/{}.csv'.format(rfol, features)
lfile = '{}.csv'.format(features)
aws.download_s3(rfile, lfile, bucket_name='ccurtis.data')
feat = pd.read_csv(lfile, sep=",", index_col='Unnamed: 0')
# f1 = feat[['AR', 'D_fit', 'MSD_ratio', 'alpha', 'asymmetry1', 'boundedness', 'elongation',
# 'fractal_dim', 'kurtosis', 'straightness', 'trappedness']].dropna(axis=0)
f1 = feat[['AR', 'D_fit', 'MSD_ratio', 'alpha', 'asymmetry1', 'boundedness', 'elongation', 'fractal_dim',
'kurtosis', 'straightness', 'trappedness']].replace([np.inf, -np.inf], np.nan).dropna(axis=0)
f2 = feat[['AR', 'D_fit', 'MSD_ratio', 'alpha', 'asymmetry1', 'boundedness', 'elongation', 'fractal_dim',
'kurtosis', 'straightness', 'trappedness']].replace([np.inf, -np.inf], np.nan).dropna(axis=0)
pca = PCA(n_components=2)
f1_r = pca.fit(f1).transform(f1)
plt.figure(figsize=(6, 6))
plt.scatter(f1_r[:, 0], f1_r[:, 1], alpha=0.05, lw=lw)
plt.show()
f1_r = pca.fit(f1).transform(f2)
plt.figure(figsize=(6, 6))
plt.scatter(f1_r[:, 0], f1_r[:, 1], alpha=0.05, lw=lw)
plt.xlim([-10, 100])
plt.ylim([-5, 20])
plt.show()
f1_r
# pca = PCA(n_components=3)
# f1_r = pca.fit(f1).transform(f1)
# fig = plt.figure()
# ax = p3.Axes3D(fig)
# ax.view_init(80, -80)
# plt.scatter(f1_r[:, 0], f1_r[:, 1], f1_r[:, 2], alpha=0.05, lw=lw)
# plt.show()
```
| github_jupyter |
#Video Overlay
Add images, text, and audio to videos.
#License
Copyright 2020 Google LLC,
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#Disclaimer
This is not an officially supported Google product. It is a reference implementation. There is absolutely NO WARRANTY provided for using this code. The code is Apache Licensed and CAN BE fully modified, white labeled, and disassembled by your team.
This code generated (see starthinker/scripts for possible source):
- **Command**: "python starthinker_ui/manage.py colab"
- **Command**: "python starthinker/tools/colab.py [JSON RECIPE]"
#1. Install Dependencies
First install the libraries needed to execute recipes, this only needs to be done once, then click play.
```
!pip install git+https://github.com/google/starthinker
```
#2. Set Configuration
This code is required to initialize the project. Fill in required fields and press play.
1. If the recipe uses a Google Cloud Project:
- Set the configuration **project** value to the project identifier from [these instructions](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md).
1. If the recipe has **auth** set to **user**:
- If you have user credentials:
- Set the configuration **user** value to your user credentials JSON.
- If you DO NOT have user credentials:
- Set the configuration **client** value to [downloaded client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md).
1. If the recipe has **auth** set to **service**:
- Set the configuration **service** value to [downloaded service credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md).
```
from starthinker.util.configuration import Configuration
CONFIG = Configuration(
project="",
client={},
service={},
user="/content/user.json",
verbose=True
)
```
#3. Enter Video Overlay Recipe Parameters
1. Provide either a sheet or a BigQuery table.
1. Each video edit will be read from the sheet or table.
Modify the values below for your use case, can be done multiple times, then click play.
```
FIELDS = {
'auth_read': 'user', # Credentials used for reading data.
'sheet': '', # Name or URL of sheet.
'tab': '', # Name of sheet tab.
'project': '', # Google Cloud Project Identifier.
'dataset': '', # Name of dataset.
'table': '', # Name of table.
}
print("Parameters Set To: %s" % FIELDS)
```
#4. Execute Video Overlay
This does NOT need to be modified unless you are changing the recipe, click play.
```
from starthinker.util.configuration import execute
from starthinker.util.recipe import json_set_fields
TASKS = [
{
'sheets': {
'__comment__': 'Copy the tamplate sheet to the users sheet. If it already exists, nothing happens.',
'auth': 'user',
'template': {
'sheet': 'https://docs.google.com/spreadsheets/d/1BXRHWz-1P3gNS92WZy-3sPZslU8aalXa8heOgygWEFs/edit#gid=0',
'tab': 'Video'
},
'sheet': {'field': {'name': 'sheet', 'kind': 'string', 'order': 1, 'default': '', 'description': 'Name or URL of sheet.'}},
'tab': {'field': {'name': 'tab', 'kind': 'string', 'order': 2, 'default': '', 'description': 'Name of sheet tab.'}}
}
},
{
'video': {
'__comment__': 'Read video effects and values from sheet and/or bigquery.',
'auth': 'user',
'sheets': {
'sheet': {'field': {'name': 'sheet', 'kind': 'string', 'order': 1, 'default': '', 'description': 'Name or URL of sheet.'}},
'tab': {'field': {'name': 'tab', 'kind': 'string', 'order': 2, 'default': '', 'description': 'Name of sheet tab.'}}
},
'bigquery': {
'project': {'field': {'name': 'project', 'kind': 'string', 'order': 3, 'default': '', 'description': 'Google Cloud Project Identifier.'}},
'dataset': {'field': {'name': 'dataset', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of dataset.'}},
'table': {'field': {'name': 'table', 'kind': 'string', 'order': 5, 'default': '', 'description': 'Name of table.'}}
}
}
}
]
json_set_fields(TASKS, FIELDS)
execute(CONFIG, TASKS, force=True)
```
| github_jupyter |
# __DATA 5600: Introduction to Regression and Machine Learning for Analytics__
## __Notes on the Bayesian Beta-Bernoulli Conjugate Model__ <br>
Author: Tyler J. Brough <br>
Last Update: September 13, 2021 <br>
<br>
```
import numpy as np
from scipy import stats
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('darkgrid')
plt.rcParams['figure.figsize'] = [10, 5]
```
<br>
## Introduction
<br>
I would like to take a step back and consider the process of Bayesian inference in a simpler model. We will first consider the simple case of flipping a coin and estimating the proportion of heads. Let $Y \in \{0,1\}$ be our random variable for the outcome of a coin flip where $y = 1$ stands for heads and $y = 0$ stands for tails.
<br>
We will first consider the case where we have a fair coin. That is, where $\theta = 0.5$, and $\theta$ is the proportion of heads in the total number of flips. By definition, each flip is independent of the last. We usually model coin flipping with the _Binomial distribution_. So we will first take some time to understand the basic details of that particular distribution.
<br>
Note that we are no so interested in the coin flipping problem per se, but rather are interested in it because it offers a simple setting in which to come to grips with the process of Bayesian statistical inference. Although, the binomial distribution does play an important role in finance, and coin flipping can be replaced in a more general setting for any binary outcome. Therefore, the process of statistical inference that we derive for coin flipping can apply much more broadly to more interesting questions. Here are some examples of binary random variables that we might be interested in:
<br>
- The existence of God
- In a two-party election, what is the probability that a person will vote for each candidate?
- If you are an airline analyst, what is the probability that a passenger will show up for his flight? Should you overbook? By how much?
- For a given job training program, what is the probability that we will see at least a $10\%$ improvement?
- For a given survey question, what is the probability that a respondent will agree or disagree?
- In online advertising what is the probability of a click-through?
<br>
<br>
## The Bernoulli and Binomial Distributions
<br>
Please see the following Wikipedia artciles on the _Bernoulli_ and _Binomial_ distributions:
- The Bernoulli distribution: <https://en.wikipedia.org/wiki/Bernoulli_distribution>
- The Binomial distribution: <https://en.wikipedia.org/wiki/Binomial_distribution>
<br>
<br>
### The Bernoulli Distribution
<br>
Consider flipping a coin a single time and recording the outcome. Let the random variable $Y$ represent the outcome of the single flip. Also let $\theta$ be the probability of the coin landing on heads (if it is a fair coin, then $\theta = 0.5$). Then we can say the following:
- $p(y = 1 | \theta) = \theta$
- $p(y = 0 | \theta) = 1 - \theta$
We can combine these equations as follows:
$$
p(y | \theta) = \theta^{y} (1 - \theta)^{(1 - y)}
$$
This is the _Bernoulli distribution_ for $Y \in \{0,1\}$ and $0 \le \theta \le 1$.
We can show that:
- $E(Y) = \theta$
- $Var(Y) = \theta (1 - \theta)$
Note that when $y = 1$ the expression simplifies to $\theta$, and when $y = 1$ the expression becomes $(1 - \theta)$
<br>
<br>
### The Binomial Distribution
<br>
Now consider the case when we flip a coin some number of times, denote this by $N$. We might then ask how many times a heads came up in the total number of flips. In this case, $\theta$ is still the probability of heads for a single flip, but we can also interpret it as the proportion of heads in $N$ flips. Each flip is called a Bernoulli trial. So the Binomial distribution is a generalization of the Bernoulli distribution. In fact, when $N = 1$, the Binomial distribution collapses to the Bernoulli distribution.
<br>
Let $X \in \{1,\ldots,N\}$ represent the number of heads in $N$ trials. In other words, let $X = \sum\limits_{i=1}^{N} Y_{i}$, where each $Y_{i}$ is the outcome of a single trial. Then $X$ has a _Binomial_ distribution (or $X$ is distributed according to the Binomial distribution):
<br>
$$
p(X | N) = \binom{N}{X} \theta^{X} (1 - \theta)^{(N - X)}
$$
<br>
where
<br>
$$
\binom{N}{X} = \frac{N!}{(N-X)!X!}
$$
<br>
is the number of ways to choose $X$ items from $N$.
<br>
Say we have the data set $D = \{y_{1}, \ldots, y_{N}\}$ where each $y_{i}$ is the observed outcome of a single flip (i.e. a single Bernoulli trial). Notice that the likelihood function becomes:
<br>
$$
p(D | \theta) = \prod\limits_{i=1}^{N} p(y_{i} | \theta) = \prod\limits_{i=1}^{N} \theta^{y_{i}} (1 - \theta)^{(1 - y_{i})} = \theta^{N_{1}} (1 - \theta)^{N_{0}}
$$
<br>
where $N_{1} = \sum_{i} y_{i}$ is the number of heads and $N_{0} = \sum_{i} (1 - y_{i})$ is the number of tails. Obviously, $N = N_{1} + N_{0}$.
<br>
This the Binomial likelihood function for our data.
<br>
<br>
<br>
## Parameter Estimation
<br>
Say we have a coin with probability of heads $\theta$. How do we estimate $\theta$ from a sequence of coin tosses $D = \{Y_{1}, \ldots, Y_{N}\}$, where $Y_{i} \in \{0,1\}$?
<br>
<br>
### Maximum Likelihood
<br>
One approach is to find a maximum likelihood estimate (__NB:__ a frequentist approach):
<br>
$$
\hat{\theta}_{ML} = \arg\max_{\theta} p(D | \theta)
$$
<br>
Given $D = \{y_{1}, \ldots, y_{N}\}$, the likelihood is
<br>
$$
p(D | \theta) = \theta^{N_{1}} (1 - \theta)^{N_{0}}
$$
<br>
as shown above. To find the maximum likelihood it is sometimes easier to work with the log-likelihood function.
<br>
The log-likelihood function is
<br>
$$
L(\theta) = \log{p(D | \theta)} = N_{1} \log{\theta} + N_{0} \log{1 - \theta}
$$
<br>
Solving for $\frac{dL}{d\theta} = 0$ yields
<br>
$$
\hat{\theta}_{ML} = \frac{N_{1}}{N_{1} + N_{0}} = \frac{N_{1}}{N}
$$
<br>
It can be shown (although we won't here) that this estimator has all the good properties that we desire from a frequentist estimator such as unbiasedness, consistency, and efficiency. It also has the nice feature of simplicity.
<br>
Suppose we flip the coin $N = 100$ times and we observe $N_{1} = 48$ heads. Then, the maximum likelihood estimator gives us:
<br>
$$
\hat{\theta}_{ML} = \frac{N_{1}}{N} = \frac{48}{100} = 0.48
$$
<br>
We might simulate this in `Python` as follows:
<br>
<br>
```
theta = 0.5
N = 100
flips = np.random.binomial(n=1, p=theta, size=N);
flips
N1 = np.sum(flips)
theta_hat_ml = N1 / N
print(f"\nThe Maximum Likelihood Estimator is: \t{theta_hat_ml : 0.6f}\n")
print(f"The Mean Squared Error is: \t\t{np.sqrt((theta - theta_hat_ml)**2.0) : 0.6f}\n\n")
```
<br>
But what happens if we flip the coin $N = 3$ times and we observe $N_{1} = 0$ heads? Then we predict that heads are impossible!
<br>
$$
\hat{\theta}_{ML} = \frac{N_{1}}{N} = \frac{0}{3} = 0
$$
<br>
This is known as a ___sparse data problem___: if we fail to see something in our data sample, then we predict that it will never happen in the future. We will see how the Bayesian approach avoids this problem.
<br>
<br>
### Bayesian Estimation
<br>
The Bayesian approach is to treat $\theta$ as an uncertain variable and to use the rules of probability to characterize that uncertainty. In addition, we can use Bayes' Rule to update our belief about $\theta$ having confronted the evidence $D = \{y_{1}, \ldots, y_{N}\}$:
<br>
$$
p(\theta | D) = \frac{p(D | \theta) p(\theta)}{\int_{\theta^{\prime}} p(\theta^{\prime}, D)}
$$
<br>
Notice that the answer to the Bayesian inference problem is an entire probability distribution that now characterizes our uncertainty regarding $\theta$ having confronted the evidence. If we wish to have a point estimate, we can report the mean or mode of the posterior distribution. We can also look at the standard deviation of the posterior distribution to give a sense of our uncertainty.
<br>
Below we will detail the process of Bayesian inference for coin flipping in some detail. We will also compare it with the maximum likelihood estimator.
<br>
<br>
<br>
## __Bayesian Inference for Coin Flipping__
<br>
<br>
### The Likelihood Function
<br>
As before, we have the Binomial likelihood function:
<br>
$$
p(D | \theta) = \theta^{N_{1}} (1 - \theta)^{N_{0}}
$$
<br>
Now that we have a good likelihood function, we turn our attention to a prior distribution that might characterize our beliefs about $\theta$ before we observe the data. We might like to use a _Natural Conjugate Prior_ distribution. For a Binomial likelihood function, one such prior distribution is the _Beta_ distribution.
<br>
Let's look at the Beta distribution next.
<br>
<br>
<br>
### The Beta Distribution
<br>
First of all, take a look at the Wikipedia article on the Beta distribution: <https://en.wikipedia.org/wiki/Beta_distribution>
<br>
The Beta distribution is expressed as follows:
<br>
$$
p(\theta) = Be(\theta | a, b) = \frac{1}{B(a,b)} \theta^{(a - 1)} (1 - \theta)^{(b - 1)}
$$
<br>
for $\theta$ in the interval $[0, 1]$ and where $B(a,b)$ is known as the _Beta Function_. See the Wikipedia article here: <https://en.wikipedia.org/wiki/Beta_function>. This is a special function with the following property:
<br>
$$
B(a,b) = \int\limits_{0}^{1} \theta^{(a - 1)} (1 - \theta)^{(b - 1)} d\theta.
$$
<br>
In `Python` the Beta function is `scipy.special.beta(a, b, out=None)`, and the Beta distribution is `scipy.stats.beta = <scipy.stats._continuous_distns.beta_gen object>`.
* ___Note:___ see here for documentation on the beta function: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.beta.html
* ___Note:___ see here for documentation on the beta distribution: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html
<br>
___Note:___ the random variable in the Beta distribution is $\theta$, and that $a$ and $b$ are its parameters.
<br>
It turns out that we can express the Beta function in terms of another special function, the _Gamma Function_ (see here: <https://en.wikipedia.org/wiki/Gamma_function>).
<br>
The Gamma function can also be expressed as an integral:
<br>
$$
\Gamma(a) = \int\limits_{0}^{\infty} t^{(a - 1)} exp(-t) dt
$$
<br>
It can be shown that
- $\Gamma(a) = (a - 1)!$
- $B(a,b) = \frac{\Gamma(a) \Gamma(b)}{\Gamma(a + b)}$
<br>
It can also be shown that:
- $E(\theta) = \frac{a}{a + b}$
<br>
The Beta distribution can generate many different shapes. Let's look at some below using some `Python` code.
<br>
Let's start off with the case when $a = b = 1$
<br>
<br>
```
## Helper function to plot Beta priors
def plot_beta_prior(a=1, b=1):
x = np.linspace(0, 1, 1000)
y = stats.beta.pdf(x, a, b)
plt.plot(x, y, lw = 2.0, color='darkblue', alpha=0.8)
plt.fill_between(x, y, facecolor='orange', alpha=0.5)
plt.title(f"Beta({a},{b}) Prior Distribution")
plt.show()
## Plot a Uniform(0, 1) which is a special case of the Beta
## Uniform(0, 1) = Beta(a=1, b=1)
plot_beta_prior(3, 3)
```
<br>
This turns out to be a special case of the Beta distribution known as the ___uniform distribution___ (see here: <https://en.wikipedia.org/wiki/Uniform_distribution_(continuous)>). It's called the Uniform distribution because it treats each value in the interval $[0,1]$ uniformly. That is, each value in $[0,1]$ is equally likely. Sometimes this form of the Beta prior is used to characterize complete ignorance (e.g. "I don't know, so I'll treat every value as equally likely.").
<br>
<br>
Next let's look at the case when $a = b = 2$.
<br>
```
plot_beta_prior(a=2, b=2)
```
<br>
Here the distribution is centered around the value $\theta = 0.5$, but it is quite diffuse - expressing quite a bit of uncertainty.
<br>
<br>
Let's look at a case when we are just a bit more certain: $a = b = 4$.
<br>
```
plot_beta_prior(a=4, b=4)
```
<br>
And even a little bit more certain: $a = b = 8$
<br>
```
plot_beta_prior(a=8, b=8)
```
<br>
We can continue this process: $a = b = 16$
<br>
```
plot_beta_prior(a=100, b=100)
```
<br>
___Note:___ play with these plots and generate your own!
We can also have a situation that is not symetric about center: $a = 2$ and $b = 4$
<br>
```
plot_beta_prior(a=2, b=4)
```
<br>
And the other way too!
<br>
```
plot_beta_prior(a=4, b=2)
```
<br>
We can even get some funkier shapes like this: $a = b = 0.5$
<br>
```
plot_beta_prior(a=0.5, b=0.5)
```
<br>
We can see that the Beta distribution is quite flexible and can express quite a number of different prior beliefs about $\theta$ depending on the chosen hyperparameters $a$ and $b$.
One way of eliciting a prior belief about $theta$ is think about having witnessed some _virtual_ flips of the coin. For example, we might expect that it is a fair coin, so that the distribution ought to be centered around $0.5$. We might parameterize this with the variable $m$, thinking of $m$ as the mean proportion of heads in our virtual experiment:
$$
m = \frac{a}{(a + b)}
$$
Further, we might think of the variable $n = a + b$ as our virtual number of flips, and $a$ as the virtual number of heads and $b$ as the virtual number of tails. As $n$ gets larger we are expressing greater and greater confidence. However, if $n$ is small, say $n = 4$, then we might not be so confident (i.e. we are imagining only having witnessed 4 flips - it wouldn't take too much data to convince us that we are wrong).
Solving these two equations gives us:
$$
\begin{aligned}
a &= m \times n \\
b &= (1 - m) \times n
\end{aligned}
$$
<br>
<br>
### The Posterior Distribution
<br>
Now we have the likelihood function and a natural conjugate prior. Let's use Bayes' Rule to derive the posterior distribution.
<br>
$$
\begin{aligned}
p(\theta | D) & \propto p(D | \theta) \times p(\theta) \\
& \propto [\theta^{N_{1}} (1 - \theta)^{N_{0}}][\theta^{(a - b)}(1 - \theta)^{(b - 1)}] \\
& = \theta^{N_{1} + a - 1} (1 - \theta)^{N_{0} + b - 1}
\end{aligned}
$$
<br>
___Note:___ this is a $Beta(\theta | N_{1} + a, N_{0} + b)$ distribution, which demonstrates that it is conjugate (i.e. of the same form as the prior).
<br>
<br>
## Applying the Model to Coin Flipping
<br>
We can now use this model to revise our beliefs as the data arrive (i.e. after each coin flip). This is called sequential analysis, and note that the Bayesian paradigm lends itself quite naturally to this kind of _on-line_ analysis due to the process of Bayesian updating. Whereas, in the frequentist case we have to precommit ourselves to conducting a study to flip the coin $N$ times, and then count the number of heads.
<br>
Let's look at an example. Suppose you start with the belief that $a = b = 2$. Then you observe the first flip $y = 1$ (a heads) to get $Beta(\theta | 3, 2)$, so the mean shifts from $E(\theta) = 2/4$ to $E(\theta | D) = 3/5$. We see that the hyperparameters $a$ and $b$ do in fact act like "pseudo counts" and correspond to "virutal" heads or tails. We might call $n = a + b$ the effective sample size (it plays a role analogous to $N = N_{1} + N_{0}$).
<br>
Let's simulate the process and see how our beliefs are updated with each flip.
<br>
```
a = 2 # virtual heads
b = 2 # virtual tails
plot_beta_prior(a=a,b=b)
## Set the "true" probability
theta = 0.5 ## i.e. a fair coin
## Simulate a single flip of the coin
flip = np.random.binomial(n=1,p=theta,size=1)
## Set up counting variables
N1 = 0
N0 = 0
## Update the prior to get the posterior using Bayes' Rule
N1 += flip[0]
N0 += 1 - flip[0]
a_post = N1 + a
b_post = N0 + b
plot_beta_prior(a=a_post,b=b_post)
## Use the recent posterior as the new prior and repeat the process
a = a_post
b = b_post
flip = np.random.binomial(n=1,p=theta,size=1)
## Update the prior to get the posterior using Bayes' Rule
N1 += flip[0]
N0 += 1 - flip[0]
a_post = N1 + a
b_post = N0 + b
plot_beta_prior(a=a_post,b=b_post)
```
<br>
And so on, and so on.
<br>
Here is a question to think about: _if we are flipping a biased coin (say $\theta = 0.45$), which analyst will discover it first? The Bayesian, or the Frequentist?_
We will discuss this example again later and show the comparison.
Here is another question to think about: _if two Bayesians start with different prior beliefs (say the first with $B(\theta | 1, 1)$ and the second with $B(\theta | 8, 8)$) how long will it take them to converge in their beliefs?_ _Will they for sure converge?_
Again, we can talk about this some more later.
One final note. The Bayesian does not have to proceed sequentially. He can process a batch of data all at once. Let's see this case, again starting with the prior $B(\theta | 2, 2)$.
<br>
```
## Set the prior
a, b = 2, 2
## Set true theta
theta = 0.5
## The number of coin flips
M = 10000
## Simulate flipping the coin M times
flips = np.random.binomial(n=1, p=theta, size=M)
## The update step
N1 = np.sum(flips)
N0 = M - N1
a_post = N1 + a
b_post = N0 + b
plot_beta_prior(a=a_post, b=b_post)
posterior_mean = a_post / (a_post + b_post)
print(f"\nThe posterior mean is: {posterior_mean : 0.4f}\n")
np.isclose(posterior_mean, theta)
print(f"\nThe MSE: {np.sqrt((theta - posterior_mean)**2.0) : 0.6f}\n")
```
<br>
__Note:__ with $N = 10000$ flips we become pretty certain after seeing the data.
Also, if we are dealing with a biased coin we will also learn that. Say the true probability of heads is $\theta = 0.65$ and that we flip the coin $N = 100$ times.
<br>
```
## Set the prior
a, b = 2, 10
## Set true theta
theta = 0.8
## The number of coin flips
M = 1000
## Simulate flipping the coin M times
flips = np.random.binomial(n=1, p=theta, size=M)
## The update step
N1 = np.sum(flips)
N0 = M - N1
a_post = N1 + a
b_post = N0 + b
plot_beta_prior(a=a_post, b=b_post)
posterior_mean = a_post / (a_post + b_post)
print(f"\nThe posterior mean is: {posterior_mean : 0.4f}\n")
np.isclose(posterior_mean, theta)
print(f"\nThe MSE: {np.sqrt((theta - posterior_mean)**2.0) : 0.6f}\n")
```
| github_jupyter |
<a href="https://colab.research.google.com/github/predicthq/phq-data-science-docs/blob/master/academic-events/part_1_data_engineering.ipynb" target="_blank"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
##### Academic Events Data Science Guides
# Part 1: Data Engineering
PredictHQ Academic Events data is event data related to Colleges and Universities.
This *How to Series* allows you to quickly extract the data (Part 1), explore the data (Part 2) and experiment with different aggregations (Part 3).
The Academic Events Category Documentation provides more information about the category https://docs.predicthq.com/categoryinfo/attended-events/#academic-events.
<b>This How to Guide, Part 1, is how to extract data from PredictHQ's Academic Events and covers:</b>
- [Setup](#setup)
- [Access Token](#access_token)
- [Support Functions](#support_functions)
- [SDK Parameters](#sdk_parameters)
- [SDK Call](#sdk_call)
- [Output Dataframe](#output_dataframe)
- [Appendix - Finding Place ID](#appendix)
<a id='setup'></a>
## Setup
If using Google Colab uncomment the following code block.
```
# %%capture
# !git clone https://github.com/predicthq/phq-data-science-docs.git
# %cd phq-data-science-docs/academic-events
# !pip install predicthq>=1.6.3 timezonefinder
```
If running locally, configure the required dependencies in your Python environment by using the [requirements.txt](https://github.com/predicthq/phq-data-science-docs/blob/master/academic-events/requirements.txt) file which is shared alongside the notebook.
These requirements can be installed by runing the command `pip install -r requirements.txt`
```
import pandas as pd
from datetime import datetime
from datetime import timedelta
from timezonefinder import TimezoneFinder
import pytz
from predicthq import Client
import requests
```
<a id='access_token'></a>
## Access Token
To query the API, you will need an access token.
The following link will guide you through creating an account and access token.
- https://docs.predicthq.com/guides/quickstart/
```
# Replace Access Token with own access token.
ACCESS_TOKEN = '<REPLACE WITH YOUR ACCESS TOKEN>'
phq = Client(access_token=ACCESS_TOKEN)
```
<a id='support_functions'></a>
## Support Functions
We recommend creating four additional features from the raw data:
#### 1) Sub-category (sub_category)
Each event is assigned into one of 5 sub categories that apply to academic events.
-holiday
-academic-session
-exam
-graduation
-social
Unlike PredictHQ event categories of school holidays or public holidays, academic holiday events have an associated attendance. This represents the full time undergraduate population that will be on holiday. The Academic Events holiday is associated with a more specific location. Its location represents the campus that students are on holiday from. Holidays in the academic events category are therefore likely to represent decremental demand in these locations.
#### 2) Session Type (session_type)
This summarises whether the event is attended physically, virtually or both.
The three options for this field are:
-in-person session
-online session
-hybrid session
The attendance numbers for these sessions are already adjusted to take account of hybrid or online sessions. The attendance figures still represent the physical attendance figures.
#### 3) Estimated (estimated)
Events are added when the academic calendar is released. For recurrent events, these can be estimated in advance of the official calendar release. Estimated events have an 'estimate' label applied. Estimated events can also apply to historic events where the event was added to our system but official historic calendars are not available. This field mainly applies to academic sessions and holidays.
#### 4) On Campus (on_campus)
Not all events occur on campus.
Calculation logic.
- All holidays are classed as off campus
As online and hybrid session attendance figures have already been adjusted to only include the number of students physically attending, these events are assigned as on campus.
```
def extract_matching_label(event_labels, labels_to_match):
''' For each event labels need to be
extracted. These labels are extracted from the labels.
As the order of the labels varies this look up is
required to compare to the frozenset of options.
'''
for label in labels_to_match:
if label in event_labels:
return label
return None
SUB_CATEGORY = frozenset([
'academic-session',
'exam',
'graduation',
'holiday',
'social',
])
SESSION_TYPE = frozenset([
'online-session',
'hybrid-session',
])
ESTIMATED = frozenset([
'estimated',
])
def extract_entity_name(row):
'''The entity represents the venue of the event
The name of the entity is required'''
if len(row['entities']) > 0:
return row['entities'][0]['name']
else:
return None
```
<a id='sdk_parameters'></a>
## SDK Parameters
We will create a dictionary of notable parameters and walk through each of the settings to use in the SDK call. A full list of available parameters and details of the API be found in our API documentation https://docs.predicthq.com/resources/events/#search-events
```
parameters_dict = dict()
```
#### Location
There are two options available to specify a location of interest:
- place_id
- radius @ latitude and longitude
`location__place_id='place_id'` or `within='radiusmi@lat,long'`
If you do not know the place_id for the location of interest you can apply a search using the API call in the [Appendix](#appendix).
For the SDK call, you can specify your own location of interest. Here are four default locations to query as an example:
- Austin, Texas: 4671654 or (30.2785, -97.7395)
- Los Angeles, California: 5368361 or (34.0778, -118.3602)
- Chicago, Illinois: 4887398 or (41.8048, -87.5871)
- Tallahassee, Florida: 4174715 or (30.4420, -84.2845)
```
parameters_dict.update(within='100mi@34.0778,-118.3602')
# m - Meters.
# km - Kilometers.
# ft - Feet.
# mi - Miles.
```
#### Time Limits
Define the period of time for which events will be returned. Either start or active can be used. The start will search based on events that start within the time period given. The active will return all events that are active within the time period, even if these events started before the start of the time period.
You could also use either of these parameters depending on your time period of interest:
```gte - Greater than or equal.``` <br>
```gt - Greater than.```<br>
```lte - Less than or equal.```<br>
```lt - Less than.```<br>
```start__tz``` or ```active__tz``` allows you to set the timezone to align with the location of interest. If no timezone is provided, UTC is used as default. This can lead to missing events at the edge of your time period, where they may not fall within the date range based on UTC, but fall within the dates based on the local timezone.
```parameters_dict.update(start__tz='America/Chicago')```
Sources to aid in finding the timezone (<a href="https://en.wikipedia.org/wiki/List_of_tz_database_time_zones">tz database</a>).
```
# Set your chosen start and end date.
START_DATE = '2019-01-01'
END_DATE = '2021-12-14'
parameters_dict.update(active__gte=START_DATE, active__lte=END_DATE)
# parameters_dict.update(start__gte=START_DATE, start__lte=END_DATE) # Alternative use of start
# timezonefinder will help to easily find a timezone from lat long.
timezone = TimezoneFinder().timezone_at(lat=34.0778, lng=-118.3602)
print(timezone)
parameters_dict.update(active__tz='America/Los_Angeles')
```
#### Category
These notebooks only relate to the 'academic' category. For other categories please see the relevant documentation.
```
parameters_dict.update(category=['academic'])
```
#### Limits `limit=500`
- When pulling historical data for a large time period many results are returned. To speed up the execution set ```limit``` to the highest available setting (500). By doing this each call to the API returns 500 results and this will generally speed up the time to retrieve large datasets.
```
parameters_dict.update(limit=500)
# For example:
parameters_dict
```
<a id='sdk_call'></a>
## SDK Call
Loop through the call to the API for each location of interest.
The data for each location is saved to csv as an example output. This can be adjusted to work with your own data pipeline.
```
# To run for your own location of interest.
# Either replace list of place ids.
# Or replace list of lat and long.
LIST_OF_PLACEID = [4671654 , 5368361, 4887398, 4174715]
LIST_OF_LAT_LONG = [['30.2785', '-97.7395'],
['34.0778', '-118.3602'],
['41.8048', '-87.5871'],
['30.4420', '-84.2845']]
TIMEZONES = [ 'America/Chicago',
'America/Los_Angeles',
'America/Chicago',
'America/New_York'
]
START_DATE = '2019-01-01'
END_DATE = '2021-01-01'
# unit can be changed (currently set to miles)
RADIUS = 10
```
In the following example: Uncomment or comment appropriately if using place_ids.
```
# Define API parameters.
parameters_dict = dict()
parameters_dict.update(active__gte=START_DATE, active__lte=END_DATE)
parameters_dict.update(category=['academic'])
parameters_dict.update(limit=500)
# Loop through each location of interest.
# Example code is provide to either loop through LIST_OF_PLACEID or LIST_OF_LAT_LONG.
#for timezone, place_id in zip(TIMEZONES, LIST_OF_PLACEID): # uncomment/comment as required.
for timezone, lat_long in zip(TIMEZONES, LIST_OF_LAT_LONG): # uncomment/comment as required.
#parameters_dict.update(place__scope=place_id) # uncomment/comment as required.
parameters_dict.update(within='{}mi@{},{}'.format(RADIUS,
lat_long[0],
lat_long[1])) # uncomment/comment as required.
# If time zones are unknown comment out this line and revert to UTC.
parameters_dict.update(active__tz=timezone)
search_results = phq.events.search(parameters_dict).iter_all()
search_results = [result.to_dict() for result in search_results]
df = pd.DataFrame(search_results)
df['entity_name'] = df.apply(extract_entity_name, axis=1)
df[['longitude', 'latitude']] = pd.DataFrame(df.location.tolist())
# Create a list of unique entities.
df_entities = df.drop_duplicates('entity_name')
df_entities = df_entities[['entity_name',
'latitude',
'longitude']]
df['sub_category'] = df.labels.apply(extract_matching_label,
args=(SUB_CATEGORY, ))
df['session_type'] = df.labels.apply(extract_matching_label,
args=(SESSION_TYPE, ))
df['estimated'] = df.labels.apply(extract_matching_label,
args=(ESTIMATED, ))
# fill non-specified session_type with in-person
df['session_type'].fillna('in-person', inplace=True)
# fill non-specified estimated with scheduled
df['estimated'].fillna('not_estimated', inplace=True)
# Where events are missing attendance fill with 0.
# PredictHQ aims to have attendance for all events.
# This assumption can be changed depending on your use case. (mean by subcategory or location)
df['phq_attendance'].fillna(0, inplace=True)
# If holiday then off campus
df.loc[df['sub_category'] == 'holiday', 'on_campus'] = False
df['on_campus'].fillna(True, inplace=True)
# Naming functionality
if 'within' in parameters_dict:
file_name = ('radius_{}_{}_{}_{}_{}'
.format(RADIUS,
lat_long[0],
lat_long[1],
START_DATE,
END_DATE)
)
else:
file_name = 'place_ids_{}_{}_{}'.format(place_id,
START_DATE,
END_DATE)
df.to_csv('data/{}.csv'.format(file_name),
index=False)
```
The returned data is at the event level. In Part 2, of this *How to Series* we will explore this data to understand the key trends. In Part 3, we'll prepare features to be used in a forecasting model.
<a id='output_dataframe'></a>
## Output Dataframe
```
df.head(2)
```
<a id='appendix'></a>
## Appendix: Finding ```place_id```
Here is a guide on how to link between locations and ```place_id```. Here the ```location``` could be a city, a state, a country or a continent.
- Query ```place_id``` based on ```location```
- Query ```place_hierarchies``` based on ```latitude, longitude```
- Query ```location``` based on ```place_id```
The full list of parameters you could use in your query is documents at our [Places API page] (https://docs.predicthq.com/resources/places/).<br>PredictHQ uses the geonames places convention https://www.geonames.org/
#### 1) Query ```place_id``` based on ```location```
By using PredictHQ Places API, you can find the ```place_id``` for a specific ```location```. By calling the API and setting ```q``` to ```location```, the API will return the most relevant ```place_id```. Taking the top ```place_id``` will provide the most relevant ```place_id``` the ```location``` is in.
```
# Example locations.
locations = ["Los Angeles", "California", "United States", "North America"]
place_id_lookup = pd.DataFrame()
for location in locations:
response = requests.get(
url="https://api.predicthq.com/v1/places/",
headers={
"Authorization": "Bearer {}".format(ACCESS_TOKEN),
"Accept": "application/json",
},
params={"q": location},
)
data = response.json()
df = pd.json_normalize(data["results"])
place_id_lookup = place_id_lookup.append(df.iloc[0], ignore_index=True)
place_id_lookup[["id", "name", "type"]]
```
#### 2) Query ```place_hierarchies``` based on ```latitude, longitude```
By using PredictHQ Places Hierarchies API, you can find the ```place_hierarchies``` for a specific ```latitude, longitude```. By calling the API and setting ```location.origin``` to ```latitude, longitude```, the API will return the most relevant ```place_hierarchies```.
```
# Example locations.
latitude_longitudes = [[34.07, -118.25]]
place_hierarchies_lookup = pd.DataFrame()
for latitude_longitude in latitude_longitudes:
latitude, longitude = latitude_longitude
response = requests.get(
url="https://api.predicthq.com/v1/places/hierarchies",
headers={
"Authorization": "Bearer {}".format(ACCESS_TOKEN),
"Accept": "application/json",
},
params={"location.origin": f"{latitude},{longitude}"},
)
data = response.json()
df = pd.DataFrame(data)
df["latitude"] = latitude
df["longitude"] = longitude
place_hierarchies_lookup = place_hierarchies_lookup.append(df, ignore_index=True)
place_hierarchies_lookup
```
For each ```latitude, longitude```, the response might include more than one hierarchy. The reason for this is to match the closest place's hierarchy but we also include the closest major city's hierarchy within a radius of 50km. This only applies if the level is below region and, if it exists, the major city's hierarchy will always be the second row of the DataFrame.
#### 3) Query ```location``` based on ```place_id```
By using PredictHQ Places API, you can find the ```location``` for a specific ```place_id```. By calling the API and setting ```id``` to ```place_id```, the API will return the most relevant ```location```. Taking the top ```location``` will provide the most relevant ```location``` the ```place_id``` is in.
```
# Example locations.
place_ids = ["6295630", "6255148", "2510769", "2513413"]
location_lookup = pd.DataFrame()
for place_id in place_ids:
response = requests.get(
url="https://api.predicthq.com/v1/places/",
headers={
"Authorization": "Bearer {}".format(ACCESS_TOKEN),
"Accept": "application/json",
},
# The id could be a comma-separated list of place_ids. In this example, the
# events are queried based on each place_id.
params={"id": place_id},
)
data = response.json()
df = pd.json_normalize(data["results"])
location_lookup = location_lookup.append(df.iloc[0], ignore_index=True)
location_lookup[["id", "name", "type"]]
```
| github_jupyter |
# Using sci-analysis
From the python interpreter or in the first cell of a Jupyter notebook, type:
```
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import scipy.stats as st
from sci_analysis import analyze
```
This will tell python to import the sci-analysis function ``analyze()``.
> Note: Alternatively, the function ``analyse()`` can be imported instead, as it is an alias for ``analyze()``. For the case of this documentation, ``analyze()`` will be used for consistency.
If you are using sci-analysis in a Jupyter notebook, you need to use the following code instead to enable inline plots:
```
%matplotlib inline
import numpy as np
import scipy.stats as st
from sci_analysis import analyze
```
Now, sci-analysis should be ready to use. Try the following code:
```
np.random.seed(987654321)
data = st.norm.rvs(size=1000)
analyze(xdata=data)
```
A histogram, box plot, summary stats, and test for normality of the data should appear above.
> Note: numpy and scipy.stats were only imported for the purpose of the above example. sci-analysis uses numpy and scipy internally, so it isn't necessary to import them unless you want to explicitly use them.
A histogram and statistics for categorical data can be performed with the following command:
```
pets = ['dog', 'cat', 'rat', 'cat', 'rabbit', 'dog', 'hamster', 'cat', 'rabbit', 'dog', 'dog']
analyze(pets)
```
Let's examine the ``analyze()`` function in more detail. Here's the signature for the ``analyze()`` function:
```
from inspect import signature
print(analyze.__name__, signature(analyze))
print(analyze.__doc__)
```
``analyze()`` will detect the desired type of data analysis to perform based on whether the ``ydata`` argument is supplied, and whether the ``xdata`` argument is a two-dimensional array-like object.
The ``xdata`` and ``ydata`` arguments can accept most python array-like objects, with the exception of strings. For example, ``xdata`` will accept a python list, tuple, numpy array, or a pandas Series object. Internally, iterable objects are converted to a Vector object, which is a pandas Series of type ``float64``.
> Note: A one-dimensional list, tuple, numpy array, or pandas Series object will all be referred to as a vector throughout the documentation.
If only the ``xdata`` argument is passed and it is a one-dimensional vector of numeric values, the analysis performed will be a histogram of the vector with basic statistics and Shapiro-Wilk normality test. This is useful for visualizing the distribution of the vector. If only the ``xdata`` argument is passed and it is a one-dimensional vector of categorical (string) values, the analysis performed will be a histogram of categories with rank, frequencies and percentages displayed.
If ``xdata`` and ``ydata`` are supplied and are both equal length one-dimensional vectors of numeric data, an x/y scatter plot with line fit will be graphed and the correlation between the two vectors will be calculated. If there are non-numeric or missing values in either vector, they will be ignored. Only values that are numeric in each vector, at the same index will be included in the correlation. For example, the two following two vectors will yield:
```
example1 = [0.2, 0.25, 0.27, np.nan, 0.32, 0.38, 0.39, np.nan, 0.42, 0.43, 0.47, 0.51, 0.52, 0.56, 0.6]
example2 = [0.23, 0.27, 0.29, np.nan, 0.33, 0.35, 0.39, 0.42, np.nan, 0.46, 0.48, 0.49, np.nan, 0.5, 0.58]
analyze(example1, example2)
```
If ``xdata`` is a sequence or dictionary of vectors, a location test and summary statistics for each vector will be performed. If each vector is normally distributed and they all have equal variance, a one-way ANOVA is performed. If the data is not normally distributed or the vectors do not have equal variance, a non-parametric Kruskal-Wallis test will be performed instead of a one-way ANOVA.
> Note: Vectors should be independent from one another --- that is to say, there shouldn't be values in one vector that are derived from or some how related to a value in another vector. These dependencies can lead to weird and often unpredictable results.
A proper use case for a location test would be if you had a table with measurement data for multiple groups, such as test scores per class, average height per country or measurements per trial run, where the classes, countries, and trials are the groups. In this case, each group should be represented by it's own vector, which are then all wrapped in a dictionary or sequence.
If ``xdata`` is supplied as a dictionary, the keys are the names of the groups and the values are the array-like objects that represent the vectors. Alternatively, ``xdata`` can be a python sequence of the vectors and the ``groups`` argument a list of strings of the group names. The order of the group names should match the order of the vectors passed to ``xdata``.
> Note: Passing the data for each group into ``xdata`` as a sequence or dictionary is often referred to as "unstacked" data. With unstacked data, the values for each group are in their own vector. Alternatively, if values are in one vector and group names in another vector of equal length, this format is referred to as "stacked" data. The ``analyze()`` function can handle either stacked or unstacked data depending on which is most convenient.
For example:
```
np.random.seed(987654321)
group_a = st.norm.rvs(size=50)
group_b = st.norm.rvs(size=25)
group_c = st.norm.rvs(size=30)
group_d = st.norm.rvs(size=40)
analyze({"Group A": group_a, "Group B": group_b, "Group C": group_c, "Group D": group_d})
```
In the example above, sci-analysis is telling us the four groups are normally distributed (by use of the Bartlett Test, Oneway ANOVA and the near straight line fit on the quantile plot), the groups have equal variance and the groups have matching means. The only significant difference between the four groups is the sample size we specified. Let's try another example, but this time change the variance of group B:
```
np.random.seed(987654321)
group_a = st.norm.rvs(0.0, 1, size=50)
group_b = st.norm.rvs(0.0, 3, size=25)
group_c = st.norm.rvs(0.1, 1, size=30)
group_d = st.norm.rvs(0.0, 1, size=40)
analyze({"Group A": group_a, "Group B": group_b, "Group C": group_c, "Group D": group_d})
```
In the example above, group B has a standard deviation of 2.75 compared to the other groups that are approximately 1. The quantile plot on the right also shows group B has a much steeper slope compared to the other groups, implying a larger variance. Also, the Kruskal-Wallis test was used instead of the Oneway ANOVA because the pre-requisite of equal variance was not met.
In another example, let's compare groups that have different distributions and different means:
```
np.random.seed(987654321)
group_a = st.norm.rvs(0.0, 1, size=50)
group_b = st.norm.rvs(0.0, 3, size=25)
group_c = st.weibull_max.rvs(1.2, size=30)
group_d = st.norm.rvs(0.0, 1, size=40)
analyze({"Group A": group_a, "Group B": group_b, "Group C": group_c, "Group D": group_d})
```
The above example models group C as a Weibull distribution, while the other groups are normally distributed. You can see the difference in the distributions by the one-sided tail on the group C boxplot, and the curved shape of group C on the quantile plot. Group C also has significantly the lowest mean as indicated by the Tukey-Kramer circles and the Kruskal-Wallis test.
| github_jupyter |
# Using `astropy.coordinates` to Match Catalogs and Plan Observations
## Authors
Erik Tollerud, Kelle Cruz
## Learning Goals
* TBD
## Keywords
coordinates, catalog matching, observational astronomy, astroquery
## Summary
In this tutorial, we will explore how the `astropy.coordinates` package and related astropy functionality can be used to help in planning observations or other exercises focused on large coordinate catalogs.
```
# Python standard-library
from urllib.parse import urlencode
from urllib.request import urlretrieve
# Third-party dependencies
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.table import Table
import numpy as np
from IPython.display import Image
# Set up matplotlib and use a nicer set of plot parameters
import matplotlib.pyplot as plt
%matplotlib inline
```
## Section 0: Describing on-sky locations with `coordinates`
Let's start by considering a field around the picturesque Hickson Compact Group 7. To do anything with this, we need to get an object that represents the coordinates of the center of this group.
```
hcg7_center = SkyCoord(9.81625*u.deg, 0.88806*u.deg, frame='icrs')
hcg7_center
```
<div class="alert alert-info">
**Note:** If you already worked through [Coords 1: Getting Started with Coordinates](http://learn.astropy.org/rst-tutorials/Coordinates-Intro.html), feel free to skip to [Section 1](#Section-1:).
</div>
In Astropy, the most common object you'll work with for coordinates is `SkyCoord`. A `SkyCoord` can be created most easily directly from angles as shown below. It's also wise to explicitly specify the frame your coordinates are in, although this is not strictly necessary because the default is ICRS.
(If you're not sure what ICRS is, it's basically safe to think of it as an approximation to an equatorial system at the J2000 equinox).
SkyCoord will also accept string-formatted coordinates either as separate strings for ra/dec or a single string. You'll have to give units, though, if they aren't part of the string itself.
```
SkyCoord('0h39m15.9s', '0d53m17.016s', frame='icrs')
SkyCoord('0:39:15.9 0:53:17.016', unit=(u.hour, u.deg), frame='icrs')
```
If the object you're interested in is in [SESAME](http://cdsweb.u-strasbg.fr/cgi-bin/Sesame), you can also look it up directly from its name using the `SkyCoord.from_name()` class method<sup>1</sup>. Note that this requires an internet connection. It's safe to skip if you don't have one, because we defined it above explicitly.
If you don't know what a class method is, think of it like an alternative constructor for a `SkyCoord` object -- calling `SkyCoord.from_name()` with a name gives you a new `SkyCoord` object. For more detailed background on what class methods are and when they're useful, see [this page](https://julien.danjou.info/blog/2013/guide-python-static-class-abstract-methods).
```
hcg7_center = SkyCoord.from_name('HCG 7')
hcg7_center
```
This object we just created has various useful ways of accessing the information contained within it. In particular, the ``ra`` and ``dec`` attributes are specialized [Quantity](http://docs.astropy.org/en/stable/units/index.html) objects (actually, a subclass called [Angle](http://docs.astropy.org/en/stable/api/astropy.coordinates.Angle.html), which in turn is subclassed by [Latitude](http://docs.astropy.org/en/stable/api/astropy.coordinates.Latitude.html) and [Longitude](http://docs.astropy.org/en/stable/api/astropy.coordinates.Longitude.html)). These objects store angles and provide pretty representations of those angles, as well as some useful attributes to quickly convert to common angle units:
```
type(hcg7_center.ra), type(hcg7_center.dec)
hcg7_center.dec
hcg7_center.ra
hcg7_center.ra.hour
```
Now that we have a `SkyCoord` object, we can try to use it to access data from the [Sloan Digitial Sky Survey](http://www.sdss.org/) (SDSS). Let's start by trying to get a picture using the SDSS image cutout service to make sure HCG7 is in the SDSS footprint and has good image quality.
This requires an internet connection, but if it fails, don't worry: the file is included in the repository so you can just let it use the local file``'HCG7_SDSS_cutout.jpg'``, defined at the top of the cell.
```
impix = 1024
imsize = 12*u.arcmin
cutoutbaseurl = 'http://skyservice.pha.jhu.edu/DR12/ImgCutout/getjpeg.aspx'
query_string = urlencode(dict(ra=hcg7_center.ra.deg,
dec=hcg7_center.dec.deg,
width=impix, height=impix,
scale=imsize.to(u.arcsec).value/impix))
url = cutoutbaseurl + '?' + query_string
# this downloads the image to your disk
urlretrieve(url, 'HCG7_SDSS_cutout.jpg')
```
Now lets take a look at the image.
```
Image('HCG7_SDSS_cutout.jpg')
```
## Section 1: Using `coordinates` and `table` to match and compare catalogs
At the end of the last section, we determined that HCG7 is in the SDSS imaging survey, so that means we can use the cells below to download catalogs of objects directly from the SDSS. Later on, we will match this catalog to another catalog covering the same field, allowing us to make plots using the combination of the two catalogs.
We will access the SDSS SQL database using the [astroquery](https://astroquery.readthedocs.org) affiliated package. This will require an internet connection and a working install of astroquery. If you don't have these you can just skip down two cells, because the data files are provided with the repository. Depending on your version of astroquery it might also issue a warning, which you should be able to safely ignore.
```
from astroquery.sdss import SDSS
sdss = SDSS.query_region(coordinates=hcg7_center, radius=20*u.arcmin,
spectro=True,
photoobj_fields=['ra','dec','u','g','r','i','z'])
```
`astroquery` queries gives us back an [astropy.table.Table](http://docs.astropy.org/en/stable/table/index.html) object. We could just work with this directly without saving anything to disk if we wanted to. But here we will use the capability to write to disk. That way, if you quit the session and come back later, you don't have to run the query a second time.
(Note that this won't work fail if you skipped the last step. Don't worry, you can just skip to the next cell with ``Table.read`` and use the copy of this table included in the tutorial.)
```
sdss.write('HCG7_SDSS_photo.dat', format='ascii')
```
If you don't have internet, you can read the table into python by running the cell below. But if you did the astroquery step above, you could skip this, as the table is already in memory as the `sdss` variable.
```
sdss = Table.read('HCG7_SDSS_photo.dat', format='ascii')
```
Ok, so we have a catalog of objects we got from the SDSS. Now lets say you have your own catalog of objects in the same field that you want to match to this SDSS catalog. In this case, we will use a catalog extracted from the [2MASS](http://www.ipac.caltech.edu/2mass/). We first load up this catalog into python.
```
twomass = Table.read('HCG7_2MASS.tbl', format='ascii')
```
Now to do matching we need `SkyCoord` objects. We'll have to build these from the tables we loaded, but it turns out that's pretty straightforward: we grab the RA and dec columns from the table and provide them to the `SkyCoord` constructor. Lets first have a look at the tables to see just what everything is that's in them.
```
sdss # just to see an example of the format
twomass # just to see an example of the format
```
OK, looks like they both have ``ra`` and ``dec`` columns, so we should be able to use that to make `SkyCoord`s.
You might first think you need to create a separate `SkyCoord` for *every* row in the table, given that up until now all `SkyCoord`s we made were for just a single point. You could do this, but it will make your code much slower. Instead, `SkyCoord` supports *arrays* of coordinate values - you just pass in array-like inputs (array `Quantity`s, lists of strings, `Table` columns, etc.), and `SkyCoord` will happily do all of its operations element-wise.
```
coo_sdss = SkyCoord(sdss['ra']*u.deg, sdss['dec']*u.deg)
coo_twomass = SkyCoord(twomass['ra'], twomass['dec'])
```
Note a subtle difference here: you had to give units for SDSS but *not* for 2MASS. This is because the 2MASS table has units associated with the columns, while the SDSS table does not (so you have to put them in manually).
Now we simply use the ``SkyCoord.match_to_catalog_sky`` method to match the two catalogs. Note that order matters: we're matching 2MASS to SDSS because there are many *more* entires in the SDSS, so it seems likely that most 2MASS objects are in SDSS (but not vice versa).
```
idx_sdss, d2d_sdss, d3d_sdss = coo_twomass.match_to_catalog_sky(coo_sdss)
```
``idx`` are the indecies into ``coo_sdss`` that get the closest matches, while ``d2d`` and ``d3d`` are the on-sky and real-space distances between the matches. In our case ``d3d`` can be ignored because we didn't give a line-of-sight distance, so its value is not particularly useful. But ``d2d`` provides a good diagnosis of whether we actually have real matches:
```
plt.hist(d2d_sdss.arcsec, histtype='step', range=(0,2))
plt.xlabel('separation [arcsec]')
plt.tight_layout()
```
Ok, they're all within an arcsecond that's promising. But are we sure it's not just that *anything* has matches within an arcescond? Lets check by comparing to a set of *random* points.
We first create a set of uniformly random points (with size matching `coo_twomass`) that cover the same range of RA/Decs that are in `coo_sdss`.
```
ras_sim = np.random.rand(len(coo_twomass))*coo_sdss.ra.ptp() + coo_sdss.ra.min()
decs_sim = np.random.rand(len(coo_twomass))*coo_sdss.dec.ptp() + coo_sdss.dec.min()
ras_sim, decs_sim
```
Now we create a `SkyCoord` from these points and match it to `coo_sdss` just like we did above for 2MASS.
Note that we do not need to explicitly specify units for `ras_sim` and `decs_sim`, because they already are unitful `Angle` objects because they were created from `coo_sdss.ra`/`coo_sdss.dec`.
```
coo_simulated = SkyCoord(ras_sim, decs_sim)
idx_sim, d2d_sim, d3d_sim = coo_simulated.match_to_catalog_sky(coo_sdss)
```
Now lets plot up the histogram of separations from our simulated catalog so we can compare to the above results from the *real* catalog.
```
plt.hist(d2d_sim.arcsec, bins='auto', histtype='step', label='Simulated', linestyle='dashed')
plt.hist(d2d_sdss.arcsec, bins='auto', histtype='step', label='2MASS')
plt.xlabel('separation [arcsec]')
plt.legend(loc=0)
plt.tight_layout()
```
Alright, great - looks like randomly placed sources should be more like an arc*minute* away, so we can probably trust that our earlier matches which were within an arc*second* are valid. So with that in mind, we can start computing things like colors that combine the SDSS and 2MASS photometry.
```
rmag = sdss['r'][idx_sdss]
grcolor = sdss['g'][idx_sdss] - rmag
rKcolor = rmag - twomass['k_m_ext']
plt.subplot(1, 2, 1)
plt.scatter(rKcolor, rmag)
plt.xlabel('r-K')
plt.ylabel('r')
plt.xlim(2.5, 4)
plt.ylim(18, 12) #mags go backwards!
plt.subplot(1, 2, 2)
plt.scatter(rKcolor, rmag)
plt.xlabel('r-K')
plt.ylabel('g-r')
plt.xlim(2.5, 4)
plt.tight_layout()
```
For more on what matching options are available, check out the [separation and matching section of the astropy documentation](http://astropy.readthedocs.org/en/latest/coordinates/matchsep.html). Or for more on what you can do with `SkyCoord`, see [its API documentation](http://astropy.readthedocs.org/en/latest/api/astropy.coordinates.SkyCoord.html).
### Exercises
Check that the ``d2d_sdss`` variable matches the on-sky separations you get from comaparing the matched ``coo_sdss`` entries to ``coo_twomass``.
Hint: You'll likely find the ``SkyCoord.separation()`` method useful here.
Compute the *physical* separation between two (or more) objects in the catalogs. You'll need line-of-sight distances, so a reasonable guess might be the distance to HCG 7, which is about 55 Mpc.
Hint: you'll want to create new `SkyCoord` objects, but with ``distance`` attributes. There's also a `SkyCoord` method that should do the rest of the work, but you'll have to poke around to figure out what it is.
## Transforming between coordinate systems and planning observations
Now lets say something excites you about one of the objects in this catalog, and you want to know if and when you might go about observing it. `astropy.coordinates` provides tools to enable this, as well.
### Introducting frame transformations
To understand the code in this section, it may help to read over the [overview of the astropy coordinates scheme](http://astropy.readthedocs.org/en/latest/coordinates/index.html#overview-of-astropy-coordinates-concepts). The key bit to understand is that all coordinates in astropy are in particular "frames", and we can transform between a specific `SkyCoord` object from one frame to another. For example, we can transform our previously-defined center of HCG7 from ICRS to Galactic coordinates:
```
hcg7_center.galactic
```
The above is actually a special "quick-access" form which internally does the same as what's in the cell below: uses the `transform_to()` method to convert from one frame to another.
```
from astropy.coordinates import Galactic
hcg7_center.transform_to(Galactic())
```
Note that changing frames also changes some of the attributes of the object, but usually in a way that makes sense:
```
hcg7_center.galactic.ra # should fail because galactic coordinates are l/b not RA/Dec
hcg7_center.galactic.b
```
### Using frame transformations to get to AltAz
To actually do anything with observability we need to convert to a frame local to an on-earth observer. By far the most common choice is horizontal coordinates, or "AltAz" coordinates. We first need to specify both where and when we want to try to observe.
```
from astropy.coordinates import EarthLocation
from astropy.time import Time
observing_location = EarthLocation(lat='31d57.5m', lon='-111d35.8m', height=2096*u.m) # Kitt Peak, Arizona
# If you're using astropy v1.1 or later, you can replace the above with this:
#observing_location = EarthLocation.of_site('Kitt Peak')
observing_time = Time('2010-12-21 1:00') # 1am UTC=6pm AZ mountain time
```
Now we use these to create an `AltAz` frame object. Note that this frame has some other information about the atmosphere, which can be used to correct for atmospheric refraction. Here we leave that alone, because the default is to ignore this effect (by setting the pressure to 0).
```
from astropy.coordinates import AltAz
aa = AltAz(location=observing_location, obstime=observing_time)
aa
```
Now we can just transform our ICRS `SkyCoord` to `AltAz` to get the location in the sky over Kitt Peak at the requested time.
```
hcg7_center.transform_to(aa)
```
Alright, it's up at 6pm, but that's pretty early to be observing. We could just try various times one at a time to see if the airmass is at a darker time, but we can do better: lets try to create an airmass plot.
```
# this gives a Time object with an *array* of times
delta_hours = np.linspace(0, 6, 100)*u.hour
full_night_times = observing_time + delta_hours
full_night_aa_frames = AltAz(location=observing_location, obstime=full_night_times)
full_night_aa_coos = hcg7_center.transform_to(full_night_aa_frames)
plt.plot(delta_hours, full_night_aa_coos.secz)
plt.xlabel('Hours from 6pm AZ time')
plt.ylabel('Airmass [Sec(z)]')
plt.ylim(0.9,3)
plt.tight_layout()
```
Great! Looks like it's at the lowest airmass in another hour or so (7pm). But might that might still be twilight... When should we start observing for proper dark skies? Fortunately, astropy provides a ``get_sun`` function that can be used to check this. Lets use it to check if we're in 18-degree twilight or not.
```
from astropy.coordinates import get_sun
full_night_sun_coos = get_sun(full_night_times).transform_to(full_night_aa_frames)
plt.plot(delta_hours, full_night_sun_coos.alt.deg)
plt.axhline(-18, color='k')
plt.xlabel('Hours from 6pm AZ time')
plt.ylabel('Sun altitude')
plt.tight_layout()
```
Looks like it's just below 18 degrees at 7, so you should be good to go!
### Exercises
Try to actually compute to some arbitrary precision (rather than eye-balling on a plot) when 18 degree twilight or sunrise/sunset hits on that night.
Try converting the HCG7 coordinates to an equatorial frame at some other equinox a while in the past (like J2000). Do you see the precession of the equinoxes?
Hint: To see a diagram of the supported frames look [here](http://docs.astropy.org/en/stable/coordinates/#module-astropy.coordinates). One of those will do what you need if you give it the right frame attributes.
## Wrap-up
For lots more documentation on the many other features of `astropy.coordinates`, check out [its section of the documentation](http://astropy.readthedocs.org/en/latest/coordinates/index.html).
You might also be interested in [the astroplan affiliated package](http://astroplan.readthedocs.org/), which uses the `astropy.coordinates` to do more advanced versions of the tasks in the last section of this tutorial.
| github_jupyter |
# Simple SPACE model example
Written by Charles M. Shobe to accompany the following publication:
Shobe, C.M., Tucker, G.E., and Barnhart, K.B.: The SPACE 1.0 model: A Landlab component for 2-D calculation of sediment transport, bedrock erosion, and landscape evolution, submitted to Geoscientific Model Development.
This notebook is an executable version of the code presented in the user manual. The steps listed here correspond to the text and code in the user manual.
Written on 5th July 2017.
### Step 1: Import the necessary libraries
```
## Import Numpy and Matplotlib packages
import numpy as np
import matplotlib.pyplot as plt #For plotting results; optional
## Import Landlab components
#Pit filling; optional
from landlab.components import DepressionFinderAndRouter
#Flow routing
from landlab.components import FlowRouter #Flow router
#SPACE model
from landlab.components import Space #SPACE model
## Import Landlab utilities
from landlab import RasterModelGrid #Grid utility
from landlab import imshow_grid #For plotting results; optional
%matplotlib inline
```
### Step 2: Define the model domain and initial conditions
```
## Set grid parameters
num_rows = 20
num_columns = 20
node_spacing = 100.0
#Instantiate model grid
mg = RasterModelGrid((num_rows, num_columns), node_spacing)
#Add field ’topographic elevation’ to the grid
mg.add_zeros('node', 'topographic__elevation')
#Set constant random seed for consistent topographic roughness
np.random.seed(seed = 5000)
## Create initial model topography
#plane tilted towards the lower−left corner
topo = mg.node_y/100000 + mg.node_x/100000
#topographic roughness
random_noise = np.random.rand(len(mg.node_y)) /1000 #impose topography values on model grid
mg['node']['topographic__elevation'] += (topo + random_noise)
#Add field 'soil__depth' to the grid
mg.add_zeros('node', 'soil__depth')
#Set 2 m of initial soil depth at core nodes
mg.at_node['soil__depth'][mg.core_nodes] = 2.0 #meters
#Add field 'bedrock__elevation' to the grid
mg.add_zeros('bedrock__elevation', at='node')
#Sum 'soil__depth' and 'bedrock__elevation'
#to yield 'topographic elevation'
mg.at_node['bedrock__elevation'][:] = mg.at_node['topographic__elevation']
mg.at_node['topographic__elevation'][:] += mg.at_node['soil__depth']
```
### Step 3: Set the boundary conditions
```
#Close all model boundary edges
mg.set_closed_boundaries_at_grid_edges(bottom_is_closed=True,
left_is_closed=True,
right_is_closed=True,
top_is_closed=True)
#Set lower-left (southwest) corner as an open boundary
mg.set_watershed_boundary_condition_outlet_id(0,mg['node']['topographic__elevation'], -9999.)
```
### Step 4: Initialize the SPACE component and any other components used
```
#Instantiate flow router
fr = FlowRouter(mg)
#Instantiate depression finder and router; optional
df = DepressionFinderAndRouter(mg)
#Instantiate SPACE model with chosen parameters
sp = Space(mg, K_sed=0.01, K_br=0.001,
F_f=0., phi=0., H_star=1., v_s=5.0, m_sp=0.5, n_sp=1.0,
sp_crit_sed=0, sp_crit_br=0, method='simple_stream_power')
```
### Step 5: Run the time loop
```
#Set model timestep
timestep = 1.0 #years
#Set elapsed time to zero
elapsed_time = 0 #years
#Set timestep count to zero
count = 0
#Set model run time
run_time = 500 #years
#Array to save sediment flux values
sed_flux = np.zeros(int(run_time // timestep))
while elapsed_time < run_time: #time units of years
#Run the flow router
fr.run_one_step()
#Run the depression finder and router; optional
df.map_depressions()
#Get list of nodes in depressions; only
#used if using DepressionFinderAndRouter
flooded = np.where(df.flood_status==3)[0]
sp.run_one_step(dt = timestep, flooded_nodes=flooded)
#Save sediment flux value to array
sed_flux[count] = mg.at_node['sediment__flux'][0]
#Add to value of elapsed time
elapsed_time += timestep
#Increase timestep count
count += 1
```
## Visualization of results
### Sediment flux map
```
#Instantiate figure
fig = plt.figure()
#Instantiate subplot
plot = plt.subplot()
#Show sediment flux map
imshow_grid(mg, 'sediment__flux', plot_name='Sediment flux', var_name = 'Sediment flux', var_units=r'm$^3$/yr', grid_units=('m', 'm'), cmap='terrain')
#Export figure to image
fig.savefig('sediment_flux_map.eps')
```
### Sedimentograph
```
#Instantiate figure
fig = plt.figure()
#Instantiate subplot
sedfluxplot = plt.subplot()
#Plot data
sedfluxplot.plot(np.arange(500),sed_flux, color = 'k', linewidth = 3)
#Add axis labels
sedfluxplot.set_xlabel('Time [yr]')
sedfluxplot.set_ylabel(r'Sediment flux [m$^3$/yr]')
#Export figure to image
fig.savefig('sedimentograph.eps')
```
| github_jupyter |
# Classes
Variables, Lists, Dictionaries etc in python is a object. Without getting into the theory part of Object Oriented Programming, explanation of the concepts will be done along this tutorial.
A class is declared as follows
class class_name:
Functions
```
class FirstClass:
pass
```
**pass** in python means do nothing.
Above, a class object named "FirstClass" is declared now consider a "egclass" which has all the characteristics of "FirstClass". So all you have to do is, equate the "egclass" to "FirstClass". In python jargon this is called as creating an instance. "egclass" is the instance of "FirstClass"
```
egclass = FirstClass()
type(egclass)
```
Now let us add some "functionality" to the class. So that our "FirstClass" is defined in a better way. A function inside a class is called as a "Method" of that class
Most of the classes will have a function named "\_\_init\_\_". These are called as magic methods. In this method you basically initialize the variables of that class or any other initial algorithms which is applicable to all methods is specified in this method. A variable inside a class is called an attribute.
These helps simplify the process of initializing a instance. For example,
Without the use of magic method or \_\_init\_\_ which is otherwise called as constructors. One had to define a **init( )** method and call the **init( )** function.
```
eg0 = FirstClass()
eg0.init()
```
But when the constructor is defined the \_\_init\_\_ is called thus intializing the instance created.
We will make our "FirstClass" to accept two variables name and symbol.
I will be explaining about the "self" in a while.
```
class FirstClass:
def __init__(self,name,symbol):
self.name = name
self.symbol = symbol
```
Now that we have defined a function and added the \_\_init\_\_ method. We can create a instance of FirstClass which now accepts two arguments.
```
eg1 = FirstClass('one',1)
eg2 = FirstClass('two',2)
print (eg1.name, eg1.symbol)
print (eg2.name, eg2.symbol)
```
**dir( )** function comes very handy in looking into what the class contains and what all method it offers
```
dir(FirstClass)
```
**dir( )** of an instance also shows it's defined attributes.
```
dir(eg1)
```
Changing the FirstClass function a bit,
```
class FirstClass:
def __init__(self,name,symbol):
self.n = name
self.s = symbol
```
Changing self.name and self.symbol to self.n and self.s respectively will yield,
```
eg1 = FirstClass('one',1)
eg2 = FirstClass('two',2)
print (eg1.name, eg1.symbol)
print (eg2.name, eg2.symbol)
```
AttributeError, Remember variables are nothing but attributes inside a class? So this means we have not given the correct attribute for the instance.
```
dir(eg1)
print (eg1.n, eg1.s)
print (eg2.n, eg2.s)
```
So now we have solved the error. Now let us compare the two examples that we saw.
When I declared self.name and self.symbol, there was no attribute error for eg1.name and eg1.symbol and when I declared self.n and self.s, there was no attribute error for eg1.n and eg1.s
From the above we can conclude that self is nothing but the instance itself.
Remember, self is not predefined it is userdefined. You can make use of anything you are comfortable with. But it has become a common practice to use self.
```
class FirstClass:
def __init__(asdf1234,name,symbol):
asdf1234.n = name
asdf1234.s = symbol
eg1 = FirstClass('one',1)
eg2 = FirstClass('two',2)
print (eg1.n, eg1.s)
print (eg2.n, eg2.s)
```
Since eg1 and eg2 are instances of FirstClass it need not necessarily be limited to FirstClass itself. It might extend itself by declaring other attributes without having the attribute to be declared inside the FirstClass.
```
eg1.cube = 1
eg2.cube = 8
dir(eg1)
```
Just like global and local variables as we saw earlier, even classes have it's own types of variables.
Class Attribute : attributes defined outside the method and is applicable to all the instances.
Instance Attribute : attributes defined inside a method and is applicable to only that method and is unique to each instance.
```
class FirstClass:
test = 'test'
def __init__(self,name,symbol):
self.name = name
self.symbol = symbol
```
Here test is a class attribute and name is a instance attribute.
```
eg3 = FirstClass('Three',3)
print (eg3.test, eg3.name)
```
Let us add some more methods to FirstClass.
```
class FirstClass:
def __init__(self,name,symbol):
self.name = name
self.symbol = symbol
def square(self):
return self.symbol * self.symbol
def cube(self):
return self.symbol * self.symbol * self.symbol
def multiply(self, x):
return self.symbol * x
eg4 = FirstClass('Five',5)
print (eg4.square())
print (eg4.cube())
eg4.multiply(2)
```
The above can also be written as,
```
FirstClass.multiply(eg4,2)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/GabrielLourenco12/python_exercises/blob/main/Exercicios5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Exercícios 5
Ler uma temperatura em graus Celsius e apresentá-la convertida em graus Fahrenheit. A fórmula de conversão é F = (9 * C + 160) / 5, na qual F é a temperatura em Fahrenheit e C é a temperatura em graus Celsius
– Função para ler e retorna o valor da temperatura (não recebe parâmetro)
– Função para fazer o cálculo (recebe como parâmetro a temperatura em graus Celsius)
– Função para mostrar o resultado, recebendo como parâmetro o valor e fazendo a impressão
```
def conversao_impressao (f):
print ('Valor em Fahrenheit é', f, 'ºF')
def conversao_calculo (c):
f = (9 * c + 160) / 5
conversao_impressao(f)
def conversao_leitura():
c = float(input('Temperatura em ºC: '))
conversao_calculo(c)
conversao_leitura()
```
Efetuar o cálculo da quantidade de litros de combustível gasto em uma viagem, utilizando um automóvel que faz 12 Km por litro. Para obter o cálculo, o usuário deve fornecer o tempo gasto na viagem e a velocidade média durante ela. Desta forma, será possível obter a distância percorrida com a fórmula DISTANCIA = TEMPO * VELOCIDADE. Tendo o valor da distância, basta calcular a quantidade de litros de combustível utilizada na viagem, com a fórmula: LITROS_USADOS = DISTANCIA / 12. O programa deve apresentar os valores da velocidade média, tempo gasto na viagem, a distância percorrida e a quantidade de litros utilizada na viagem
– Função para ler os valores (não recebe parâmetro e retorna os dois valores)
– Função para calcular a distância (recebe como parâmetro o tempo e a velocidade e retorna a distância)
– Função para calcular a quantidade de litros (recebe como parâmetro a distância e retorna os litros)
– Função para apresentar o resultado (recebe como parâmetro os valores e somente imprime o resultado)
```
def leitura ():
tempo = (float(input('Tempo gasto (min): ')))/60
velo_media = float(input('Velocidade média (km/h): '))
return tempo, velo_media
def calc_distancia (tempo, velo_media):
distancia = tempo * velo_media
return distancia
def calc_litros (distancia):
litros = distancia/12
return litros
def impressao (tempo, velo_media, distancia, litros):
print('Tempo gasto: ', tempo)
print('Velocidade média: ', velo_media)
print('Distância: ', distancia)
print('Litros gastos: ', litros)
t, v = leitura()
d = calc_distancia(t,v)
l = calc_litros(d)
impressao(t,v,d,l)
```
| github_jupyter |
## Numpy
```
%matplotlib inline
%load_ext autoreload
%autoreload 2
import os
import sys
p = os.path.join(os.path.dirname('__file__'), '..')
sys.path.append(p)
from common import *
```
### Init
```
np.ones(10).astype(int)
np.zeros(10)
np.arange(1,10)
# Gaussian normal distribution
np.random.randn(2,2)
# Random uniform, no params
np.random.rand(10)
## Uniform distribution, custom
np.random.uniform(low=0,high=10,size=(2,2))
```
### Slice
```
x = np.arange(10)
x
## Reverse array
x[::-1]
## No copy subarray (MODIFIES ORIGINAL ARRAY)
y = x[3:7]
y[2] = 999
x
## Copy subarray
x = np.arange(10)
y = x[3:7].copy()
y[2] = 999
x
```
### Concat
```
# Concat 1D arrays
x = np.array([1, 2, 3])
y = np.array([3, 2, 1])
np.concatenate([x, y])
# Concat 2D along Row
grid = np.array([[1, 2, 3],
[4, 5, 6]])
np.concatenate([grid, grid])
# Concat 2D arrays along Column
grid = np.array([[1, 2, 3],
[4, 5, 6]])
np.concatenate([grid, grid], axis=1)
```
### Stack
* Like concatenate, but for arrays of different dimensions
```
# Vertical
x = np.array([1, 2, 3])
grid = np.array([[9, 8, 7],
[6, 5, 4]])
np.vstack([x, grid])
# Horizontal
y = np.array([[99],
[99]])
np.hstack([grid, y])
```
### Functions
```
x = np.arange(1,10)
# Natural log (undos exponent)
np.log(x)
# Base 10 log
np.log10(x)
# Power
x**2
# Exponentials
print(2**x) # 2^x
np.exp(x) # e^x
```
### Min, Max, Sum
* Numpy's built-in methods are faster
```
big_array = np.random.rand(100000)
%timeit sum(big_array)
%timeit np.sum(big_array)
M = np.random.uniform(1,10, (2, 3)).astype(int)
M
# Sum
print(np.sum(M)) # sum all cells
print(np.sum(M, axis=1)) # sum the columns
print(np.sum(M, axis=0)) # sum the rows
# Percent
print("25th percentile: ", np.percentile(M, 25))
print("Median: ", np.median(M))
print("75th percentile: ", np.percentile(M, 75))
```
### Broadcasting
* https://jakevdp.github.io/PythonDataScienceHandbook/02.05-computation-on-arrays-broadcasting.html
* Rule 1: If the two arrays differ in their number of dimensions, the shape of the one with fewer dimensions is padded with ones on its leading (left) side.
* Rule 2: If the shape of the two arrays does not match in any dimension, the array with shape equal to 1 in that dimension is stretched to match the other shape.
* Rule 3: If in any dimension the sizes disagree and neither is equal to 1, an error is raised.
### Masking
* https://jakevdp.github.io/PythonDataScienceHandbook/02.06-boolean-arrays-and-masks.html
```
x = np.array([1, 2, 3, 4, 5])
# Get boolean mask
x != 0
# Count non-zero
np.sum(x != 0)
# Compound expressions
np.sum((x > 2) & (x < 5))
# Reverse mask (NOT)
mask = (x > 2) & (x < 5)
print(mask)
print(~mask)
# Get subarray from mask
x[mask]
```
### Bitwise operators
* & np.bitwise_and
* | np.bitwise_or
* ^ np.bitwise_xor
* ~ np.bitwise_not
```
x[x < 5]
"""
`and` and `or` gauge the truth or falsehood of entire object (all elements)
`&` and `|` refer to bits within each object
"""
print(bool(42), bool(0))
print(bool(42 and 0))
print(bool(42 or 0))
# Binary operators
bin(42 & 59)
bin(42 | 59)
A = np.array([1, 0, 1, 0, 1, 0])
B = np.array([1, 1, 1, 0, 1, 1])
A | B
A or B
```
### Sorting
```
x = np.arange(1,10)
np.random.shuffle(x) # in-place
x
# Sort the array
np.sort(x)
# Return the sorted indices
x, np.argsort(x)
X = np.random.randint(0, 10, (3, 4))
X
# Sort Columns
np.sort(X, axis=0)
# Sort Rows
np.sort(X, axis=1)
```
### K-Nearest Neighbors
```
# K-smallest values in the array at the beginning
x = np.array([7, 2, 3, 1, 6, 5, 4])
np.partition(x, 3)
X = np.random.randint(1,10, (10, 2))
plt.scatter(X[:, 0], X[:, 1], s=100);
## Sum squared distance
# for each pair of points, compute differences in their coordinates
differences = X[:, np.newaxis, :] - X[np.newaxis, :, :]
differences.shape
# square the coordinate differences
sq_differences = differences ** 2
sq_differences.shape
# sum the coordinate differences to get the squared distance
dist_sq = sq_differences.sum(-1)
dist_sq
dist_sq.diagonal()
nearest = np.argsort(dist_sq, axis=1)
print(nearest)
K = 2
nearest_partition = np.argpartition(dist_sq, K + 1, axis=1)
plt.scatter(X[:, 0], X[:, 1], s=100)
# draw lines from each point to its two nearest neighbors
K = 2
for i in range(X.shape[0]):
for j in nearest_partition[i, :K+1]:
# plot a line from X[i] to X[j]
# use some zip magic to make it happen:
plt.plot(*zip(X[j], X[i]), color='black')
```
| github_jupyter |
# Creating Python Virtual Environments with conda
- [Overview](#conda-virual-env-overview)
- [Setting Up a Virtual Environment Using conda](#setting-up-a-virtual-environment-using-conda)
- [Creating a conda Virtual Environment from a File](#creating-a-conda-environment-from-a-file)
- [Setting Up a RAPIDS conda Environment with cuDF and cuML](#setting-up-a-rapids-conda-environment)
<a id="conda-virual-env-overview"></a>
## Overview
A Python virtual environment is a named, isolated, working copy of Python that maintains its own files, directories, and paths so that you can work with specific versions of libraries or Python itself without affecting other Python projects.
Virtual environments make it easy to cleanly separate projects and avoid problems with different dependencies and version requirements across components.
The conda command-line interface (CLI) is the preferred interface for managing installations and virtual environments with the Anaconda Python distribution.
The Jupyter Notebook service of the Iguazio Data Science Platform ("the platform") comes pre-deployed with conda.
This tutorial explains how to use conda to create a Python virtual environment that will be available as a custom kernel in Jupyter Notebook.
For general information about using conda to create virtual environments, see the [conda documentation](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html).
<a id="setting-up-a-virtual-environment-using-conda"></a>
## Setting Up a Virtual Environment Using conda
Follow these steps from your Jupyter Notebook service to create a Python virtual environment using conda:
1. <a id="conda-virt-env-setup-step-create-terminal"></a>Create a new terminal by selecting the **New Launcher** option (`+` icon) from the top action toolbar in the left sidebar, and then selecting **Terminal** from the main work area.<br>
The next steps should be executed from your new terminal, except where otherwise specified.
2. <a id="conda-virt-env-setup-step-create-env"></a>Create a new Python virtual environment by running the following command.
Replace `<environment name>` with your preferred virtual-environment name:
```sh
conda create -n <environment name> ipykernel python=3.7
```
<br>For example, the following command creates an environment named "myenv":<br>
```sh
conda create -n myenv ipykernel python=3.7
```
3. <a id="conda-virt-env-setup-step-install-packages"></a>Install the desired Python packages by running the following command.
Replace `<environment name>` with the name of the environment that you created in the previous step and `<package>` with the name of your selected package, optionally followed by `=<version>`; you can specify multiple packages:
```sh
conda install -n <environment name> <package> [<package> ...]
```
<br>For example, the following command installs the SciPy, pandas version 0.25.0, and TensorFlow version 1.13.1 packages for a "myenv" environment:
```sh
conda install -n myenv scipy pandas=0.25.0 tensorflow=1.13.1 python=3.7
```
4. <a id="conda-virt-env-setup-step-export-env"></a>Export your new virtual environment to an environment file in a platform data container by running the following command.
Replace `<container name>` with the name of a platform data container, `<directory path>` with an optional relative container-directories path, and `<environment name>` with the name of the environment that you created:
```sh
conda env export -n <environment name> > /v3io/<container name>[/<directory path>]/<environment name>.yaml
```
<br>It's recommended that you save the environment to a virtual-environments directory in your running-user home directory (**users/<running user>**).
For example, the following command creates a **users/<running user>/virtual_env/myenv.yaml** file:
```sh
conda env export -n myenv > /v3io/users/$V3IO_USERNAME/virtual_env/myenv.yaml
```
You can use the `User` data mount to the running-user directory to shorten this command (see [Platform Data Containers](data-ingestion-and-preparation/README.ipynb#platform-data-containers)):
```sh
conda env -n myenv export > /User/virtual_env/myenv.yaml
```
5. <a id="conda-virt-env-setup-step-refresh-ui"></a>Refresh the JupyterLab UI to apply your changes.
After refreshing the UI, you should be able to see your new environment in the list of available kernels in Jupyter Notebook.
<a id="creating-a-conda-environment-from-a-file"></a>
### Creating a conda Virtual Environment from a File
If, for any reason, your conda environment variable is removed from Jupyter Notebook, you can easily deploy it again by using the YAML environment file that you exported in [Step 4](#conda-virt-env-setup-step-export-env) of the setup procedure:
1. Open a new Jupyter terminal.
2. Run the following command to recreate the environment from the environment file.
Replace `<directory path>` and `<environment name>` to set the path to the environment file that you saved as part of the initial setup:
```sh
conda env create --file /v3io/<container name>[/<directory path>]/<environment name>.yaml
```
For example, the following command loads a **users/<running user>/virtual_env/myenv.yaml** environment file; the command uses the `User` running-user directory data mount to the running-user directory in the "users" container:
```sh
conda env create --file /User/virtual_env/myenv.yaml
```
<a id="setting-up-a-rapids-conda-environment"></a>
## Setting Up a RAPIDS conda Environment with cuDF and cuML
To use the cuDF and cuML RAPIDS libraries, you need to create a RAPIDS conda environment.
Use the following command to create a RAPIDS conda environment named `rapids`:
```sh
conda create -n rapids -c rapidsai -c nvidia -c anaconda -c conda-forge -c defaults ipykernel rapids=0.17 python=3.7 cudatoolkit=11.0
```
| github_jupyter |
# Description
## This notebok provides set of commands to install Spark NLP for offline usage. It contains 4 sections:
1) Download all dependencies for Spark NLP
2) Download all dependencies for Spark NLP (enterprise/licensed)
3) Download all dependencies for Spark NLP OCR
4) Download all models/embeddings for offline usage
5) Example of NER
## 1) Download all dependencies for Spark NLP
```
import json
with open('workshop_license_keys_365.json') as f:
license_keys = json.load(f)
license_keys.keys()
os.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE']
os.environ['AWS_ACCESS_KEY_ID']= license_keys['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY']
os.environ['JSL_OCR_LICENSE'] = license_keys['SPARK_OCR_LICENSE']
version = license_keys['PUBLIC_VERSION']
jsl_version = license_keys['JSL_VERSION']
version
! apt-get update -qq
! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
!java -version
!pip install --ignore-installed -q pyspark==2.4.4
!pip list | grep spark
!sudo apt install awscli
# spark-nlp jar
!wget -q https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/spark-nlp-assembly-2.6.0.jar
# spark-nlp wheel
# link to the latest release will be found from https://pypi.org/project/spark-nlp/#files and downloaded
!wget -q https://files.pythonhosted.org/packages/2a/98/a1615193d6788ca68080508723082263a86e69f0815afa9a96309512a3ea/spark-nlp-2.6.0.tar.gz
!tar -xvf spark-nlp-2.6.0.tar.gz
!pip install -q spark-nlp-2.6.0/
```
## 2) Download all dependencies for Spark NLP (enterprise/licensed)
```
# here you need to enter your AWS KEY and AWS SECRET KEY.
# As a region enter "ohio"
# As a language enter "en"
!aws configure
jsl_secret = license_keys['SECRET']
jsl_jar = jsl_version+'.jar'
jsl_tar = jsl_version+'.tar.gz'
# spark nlp JSL wheel
!sudo aws -q s3 cp --region us-east-2 s3://pypi.johnsnowlabs.com/$jsl_secret/spark-nlp-jsl-$jsl_jar spark-nlp-jsl-$jsl_jar
!sudo aws -q s3 cp --region us-east-2 s3://pypi.johnsnowlabs.com/$secret/spark-nlp-jsl/spark-nlp-jsl-$jsl_tar spark-nlp-jsl-$jsl_tar
!tar -xvf spark-nlp-jsl-$jsl_tar
!pip install -q /content/spark-nlp-jsl-$jsl_version/
!pip list | grep spark
```
## 3) Download all dependencies for Spark NLP OCR
```
ocr_secret = license_keys['SPARK_OCR_SECRET']
ocr_version = ocr_secret.split('-')[0]
ocr_jar = ocr_version+'.jar'
ocr_tar = ocr_version+'.tar.gz'
!wget -q https://pypi.johnsnowlabs.com/$ocr_secret/jars/spark-ocr-assembly-$ocr_jar
!wget -q https://pypi.johnsnowlabs.com/$ocr_secret/spark-ocr/spark-ocr-$ocr_tar
# unpack wheel OCR
!tar -xvf /content/spark-ocr-$ocr_tar
!pip install -q /content/spark-ocr-$ocr_version/
#sanity check
!pip list | grep spark
```
## Installation completed. Let's download models using AWS keys
## 4) Download all models/embeddings for offline usage
```
# This code will download >100 GB of Spark NLP models to your local disk
# !sudo aws s3 cp --region us-east-2 s3://auxdata.johnsnowlabs.com/public/models/ public_models/ --recursive
# This code also will download >100 GB of clinical embeddings from Spark NLP models
# !sudo aws s3 cp --region us-east-2 s3://auxdata.johnsnowlabs.com/clinical/models/ clinical_models/ --recursive
# For example purposes let's download only subset for NER and glove
!sudo aws s3 cp --region us-east-2 s3://auxdata.johnsnowlabs.com/public/models/ public_models/ --recursive --exclude "*" --include "ner_dl*"
!sudo aws s3 cp --region us-east-2 s3://auxdata.johnsnowlabs.com/public/models/ public_models/ --recursive --exclude "*" --include "glove*"
# !sudo aws s3 cp --region us-east-2 s3://auxdata.johnsnowlabs.com/clinical/models/ clinical_models/ --recursive --exclude "*" --include "embeddings_clinical*"
```
## 5) Example on NER
```
!unzip -q /content/public_models/ner_dl_en_2.4.3_2.4_1584624950746.zip -d ner_dl_glove/
!unzip -q /content/public_models/glove_100d_en_2.4.0_2.4_1579690104032.zip -d glove_embeddings/
ner_local_path = 'ner_dl_glove'
embeddings_local_path = 'glove_embeddings'
spark_nlp_jar_path = "/content/spark-nlp-assembly-"+version+".jar"
spark_nlp_internal = "/content/spark-nlp-jsl-"+jsl_jar
spark_nlp_jar_path = spark_nlp_jar_path+","+spark_nlp_internal
import json
import os
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.base import *
import sparknlp_jsl
def start():
builder = SparkSession.builder \
.appName("Spark NLP Licensed") \
.master("local[*]") \
.config("spark.driver.memory", "10G") \
.config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \
.config("spark.kryoserializer.buffer.max", "2000M") \
.config("spark.jars", spark_nlp_jar_path)
return builder.getOrCreate()
spark = start()
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
# ner_dl model is trained with glove_100d. So we use the same embeddings in the pipeline
glove_embeddings = WordEmbeddingsModel.load(embeddings_local_path).\
setInputCols(["document", 'token']).\
setOutputCol("embeddings")
# NER model trained on i2b2 (sampled from MIMIC) dataset
public_ner = NerDLModel.load(ner_local_path) \
.setInputCols(["document", "token", "embeddings"]) \
.setOutputCol("ner")
nlpPipeline = Pipeline(stages=[
documentAssembler,
tokenizer,
glove_embeddings,
public_ner
])
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
df = spark.createDataFrame([['Peter Parker lives in New York.']]).toDF("text")
result = pipelineModel.transform(df)
result.select('token.result','ner.result').show(truncate=False)
light_model = LightPipeline(pipelineModel)
text = 'Peter Parker lives in New York.'
light_result = light_model.annotate(text)
list(zip(light_result['token'], light_result['ner']))
```
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.

# Automated Machine Learning
_**Classification with Deployment using a Bank Marketing Dataset**_
## Contents
1. [Introduction](#Introduction)
1. [Setup](#Setup)
1. [Train](#Train)
1. [Results](#Results)
1. [Deploy](#Deploy)
1. [Test](#Test)
1. [Acknowledgements](#Acknowledgements)
## Introduction
In this example we use the UCI Bank Marketing dataset to showcase how you can use AutoML for a classification problem and deploy it to an Azure Container Instance (ACI). The classification goal is to predict if the client will subscribe to a term deposit with the bank.
If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace.
Please find the ONNX related documentations [here](https://github.com/onnx/onnx).
In this notebook you will learn how to:
1. Create an experiment using an existing workspace.
2. Configure AutoML using `AutoMLConfig`.
3. Train the model using local compute with ONNX compatible config on.
4. Explore the results, featurization transparency options and save the ONNX model
5. Inference with the ONNX model.
6. Register the model.
7. Create a container image.
8. Create an Azure Container Instance (ACI) service.
9. Test the ACI service.
In addition this notebook showcases the following features
- **Blocking** certain pipelines
- Specifying **target metrics** to indicate stopping criteria
- Handling **missing data** in the input
## Setup
As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.
```
import logging
from matplotlib import pyplot as plt
import pandas as pd
import os
import azureml.core
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.automl.core.featurization import FeaturizationConfig
from azureml.core.dataset import Dataset
from azureml.train.automl import AutoMLConfig
from azureml.interpret import ExplanationClient
```
This sample notebook may use features that are not available in previous versions of the Azure ML SDK.
```
print("This notebook was created using version 1.20.0 of the Azure ML SDK")
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
```
Accessing the Azure ML workspace requires authentication with Azure.
The default authentication is interactive authentication using the default tenant. Executing the `ws = Workspace.from_config()` line in the cell below will prompt for authentication the first time that it is run.
If you have multiple Azure tenants, you can specify the tenant by replacing the `ws = Workspace.from_config()` line in the cell below with the following:
```
from azureml.core.authentication import InteractiveLoginAuthentication
auth = InteractiveLoginAuthentication(tenant_id = 'mytenantid')
ws = Workspace.from_config(auth = auth)
```
If you need to run in an environment where interactive login is not possible, you can use Service Principal authentication by replacing the `ws = Workspace.from_config()` line in the cell below with the following:
```
from azureml.core.authentication import ServicePrincipalAuthentication
auth = auth = ServicePrincipalAuthentication('mytenantid', 'myappid', 'mypassword')
ws = Workspace.from_config(auth = auth)
```
For more details, see [aka.ms/aml-notebook-auth](http://aka.ms/aml-notebook-auth)
```
ws = Workspace.from_config()
# choose a name for experiment
experiment_name = 'automl-classification-bmarketing-all'
experiment=Experiment(ws, experiment_name)
output = {}
output['Subscription ID'] = ws.subscription_id
output['Workspace'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Experiment Name'] = experiment.name
pd.set_option('display.max_colwidth', -1)
outputDf = pd.DataFrame(data = output, index = [''])
outputDf.T
```
## Create or Attach existing AmlCompute
You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.
#### Creation of AmlCompute takes approximately 5 minutes.
If the AmlCompute with that name is already in your workspace this code will skip the creation process.
As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.
```
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
cpu_cluster_name = "cpu-cluster-4"
# Verify that cluster does not exist already
try:
compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',
max_nodes=6)
compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
```
# Data
### Load Data
Leverage azure compute to load the bank marketing dataset as a Tabular Dataset into the dataset variable.
### Training Data
```
data = pd.read_csv("https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv")
data.head()
# Add missing values in 75% of the lines.
import numpy as np
missing_rate = 0.75
n_missing_samples = int(np.floor(data.shape[0] * missing_rate))
missing_samples = np.hstack((np.zeros(data.shape[0] - n_missing_samples, dtype=np.bool), np.ones(n_missing_samples, dtype=np.bool)))
rng = np.random.RandomState(0)
rng.shuffle(missing_samples)
missing_features = rng.randint(0, data.shape[1], n_missing_samples)
data.values[np.where(missing_samples)[0], missing_features] = np.nan
if not os.path.isdir('data'):
os.mkdir('data')
# Save the train data to a csv to be uploaded to the datastore
pd.DataFrame(data).to_csv("data/train_data.csv", index=False)
ds = ws.get_default_datastore()
ds.upload(src_dir='./data', target_path='bankmarketing', overwrite=True, show_progress=True)
# Upload the training data as a tabular dataset for access during training on remote compute
train_data = Dataset.Tabular.from_delimited_files(path=ds.path('bankmarketing/train_data.csv'))
label = "y"
```
### Validation Data
```
validation_data = "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_validate.csv"
validation_dataset = Dataset.Tabular.from_delimited_files(validation_data)
```
### Test Data
```
test_data = "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_test.csv"
test_dataset = Dataset.Tabular.from_delimited_files(test_data)
```
## Train
Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.
|Property|Description|
|-|-|
|**task**|classification or regression or forecasting|
|**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>|
|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|
|**blocked_models** | *List* of *strings* indicating machine learning algorithms for AutoML to avoid in this run. <br><br> Allowed values for **Classification**<br><i>LogisticRegression</i><br><i>SGD</i><br><i>MultinomialNaiveBayes</i><br><i>BernoulliNaiveBayes</i><br><i>SVM</i><br><i>LinearSVM</i><br><i>KNN</i><br><i>DecisionTree</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>GradientBoosting</i><br><i>TensorFlowDNN</i><br><i>TensorFlowLinearClassifier</i><br><br>Allowed values for **Regression**<br><i>ElasticNet</i><br><i>GradientBoosting</i><br><i>DecisionTree</i><br><i>KNN</i><br><i>LassoLars</i><br><i>SGD</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>TensorFlowLinearRegressor</i><br><i>TensorFlowDNN</i><br><br>Allowed values for **Forecasting**<br><i>ElasticNet</i><br><i>GradientBoosting</i><br><i>DecisionTree</i><br><i>KNN</i><br><i>LassoLars</i><br><i>SGD</i><br><i>RandomForest</i><br><i>ExtremeRandomTrees</i><br><i>LightGBM</i><br><i>TensorFlowLinearRegressor</i><br><i>TensorFlowDNN</i><br><i>Arima</i><br><i>Prophet</i>|
|**allowed_models** | *List* of *strings* indicating machine learning algorithms for AutoML to use in this run. Same values listed above for **blocked_models** allowed for **allowed_models**.|
|**experiment_exit_score**| Value indicating the target for *primary_metric*. <br>Once the target is surpassed the run terminates.|
|**experiment_timeout_hours**| Maximum amount of time in hours that all iterations combined can take before the experiment terminates.|
|**enable_early_stopping**| Flag to enble early termination if the score is not improving in the short term.|
|**featurization**| 'auto' / 'off' Indicator for whether featurization step should be done automatically or not. Note: If the input data is sparse, featurization cannot be turned on.|
|**n_cross_validations**|Number of cross validation splits.|
|**training_data**|Input dataset, containing both features and label column.|
|**label_column_name**|The name of the label column.|
**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)
```
automl_settings = {
"experiment_timeout_hours" : 0.3,
"enable_early_stopping" : True,
"iteration_timeout_minutes": 5,
"max_concurrent_iterations": 4,
"max_cores_per_iteration": -1,
#"n_cross_validations": 2,
"primary_metric": 'AUC_weighted',
"featurization": 'auto',
"verbosity": logging.INFO,
}
automl_config = AutoMLConfig(task = 'classification',
debug_log = 'automl_errors.log',
compute_target=compute_target,
experiment_exit_score = 0.9984,
blocked_models = ['KNN','LinearSVM'],
enable_onnx_compatible_models=True,
training_data = train_data,
label_column_name = label,
validation_data = validation_dataset,
**automl_settings
)
```
Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous.
```
remote_run = experiment.submit(automl_config, show_output = False)
remote_run
```
Run the following cell to access previous runs. Uncomment the cell below and update the run_id.
```
#from azureml.train.automl.run import AutoMLRun
#remote_run = AutoMLRun(experiment=experiment, run_id='<run_ID_goes_here')
#remote_run
# Wait for the remote run to complete
remote_run.wait_for_completion()
best_run_customized, fitted_model_customized = remote_run.get_output()
```
## Transparency
View updated featurization summary
```
custom_featurizer = fitted_model_customized.named_steps['datatransformer']
df = custom_featurizer.get_featurization_summary()
pd.DataFrame(data=df)
```
Set `is_user_friendly=False` to get a more detailed summary for the transforms being applied.
```
df = custom_featurizer.get_featurization_summary(is_user_friendly=False)
pd.DataFrame(data=df)
df = custom_featurizer.get_stats_feature_type_summary()
pd.DataFrame(data=df)
```
## Results
```
from azureml.widgets import RunDetails
RunDetails(remote_run).show()
```
### Retrieve the Best Model's explanation
Retrieve the explanation from the best_run which includes explanations for engineered features and raw features. Make sure that the run for generating explanations for the best model is completed.
```
# Wait for the best model explanation run to complete
from azureml.core.run import Run
model_explainability_run_id = remote_run.id + "_" + "ModelExplain"
print(model_explainability_run_id)
model_explainability_run = Run(experiment=experiment, run_id=model_explainability_run_id)
model_explainability_run.wait_for_completion()
# Get the best run object
best_run, fitted_model = remote_run.get_output()
```
#### Download engineered feature importance from artifact store
You can use ExplanationClient to download the engineered feature explanations from the artifact store of the best_run.
```
client = ExplanationClient.from_run(best_run)
engineered_explanations = client.download_model_explanation(raw=False)
exp_data = engineered_explanations.get_feature_importance_dict()
exp_data
```
#### Download raw feature importance from artifact store
You can use ExplanationClient to download the raw feature explanations from the artifact store of the best_run.
```
client = ExplanationClient.from_run(best_run)
engineered_explanations = client.download_model_explanation(raw=True)
exp_data = engineered_explanations.get_feature_importance_dict()
exp_data
```
### Retrieve the Best ONNX Model
Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. The Model includes the pipeline and any pre-processing. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*.
Set the parameter return_onnx_model=True to retrieve the best ONNX model, instead of the Python model.
```
best_run, onnx_mdl = remote_run.get_output(return_onnx_model=True)
```
### Save the best ONNX model
```
from azureml.automl.runtime.onnx_convert import OnnxConverter
onnx_fl_path = "./best_model.onnx"
OnnxConverter.save_onnx_model(onnx_mdl, onnx_fl_path)
```
### Predict with the ONNX model, using onnxruntime package
```
import sys
import json
from azureml.automl.core.onnx_convert import OnnxConvertConstants
from azureml.train.automl import constants
if sys.version_info < OnnxConvertConstants.OnnxIncompatiblePythonVersion:
python_version_compatible = True
else:
python_version_compatible = False
import onnxruntime
from azureml.automl.runtime.onnx_convert import OnnxInferenceHelper
def get_onnx_res(run):
res_path = 'onnx_resource.json'
run.download_file(name=constants.MODEL_RESOURCE_PATH_ONNX, output_file_path=res_path)
with open(res_path) as f:
onnx_res = json.load(f)
return onnx_res
if python_version_compatible:
test_df = test_dataset.to_pandas_dataframe()
mdl_bytes = onnx_mdl.SerializeToString()
onnx_res = get_onnx_res(best_run)
onnxrt_helper = OnnxInferenceHelper(mdl_bytes, onnx_res)
pred_onnx, pred_prob_onnx = onnxrt_helper.predict(test_df)
print(pred_onnx)
print(pred_prob_onnx)
else:
print('Please use Python version 3.6 or 3.7 to run the inference helper.')
```
## Deploy
### Retrieve the Best Model
Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*.
#### Widget for Monitoring Runs
The widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.
**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details
```
best_run, fitted_model = remote_run.get_output()
model_name = best_run.properties['model_name']
script_file_name = 'inference/score.py'
best_run.download_file('outputs/scoring_file_v_1_0_0.py', 'inference/score.py')
```
### Register the Fitted Model for Deployment
If neither `metric` nor `iteration` are specified in the `register_model` call, the iteration with the best primary metric is registered.
```
description = 'AutoML Model trained on bank marketing data to predict if a client will subscribe to a term deposit'
tags = None
model = remote_run.register_model(model_name = model_name, description = description, tags = tags)
print(remote_run.model_id) # This will be written to the script file later in the notebook.
```
### Deploy the model as a Web Service on Azure Container Instance
```
from azureml.core.model import InferenceConfig
from azureml.core.webservice import AciWebservice
from azureml.core.webservice import Webservice
from azureml.core.model import Model
from azureml.core.environment import Environment
inference_config = InferenceConfig(entry_script=script_file_name)
aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1,
memory_gb = 1,
tags = {'area': "bmData", 'type': "automl_classification"},
description = 'sample service for Automl Classification')
aci_service_name = 'automl-sample-bankmarketing-all'
print(aci_service_name)
aci_service = Model.deploy(ws, aci_service_name, [model], inference_config, aciconfig)
aci_service.wait_for_deployment(True)
print(aci_service.state)
```
### Get Logs from a Deployed Web Service
Gets logs from a deployed web service.
```
#aci_service.get_logs()
```
## Test
Now that the model is trained, run the test data through the trained model to get the predicted values. This calls the ACI web service to do the prediction.
Note that the JSON passed to the ACI web service is an array of rows of data. Each row should either be an array of values in the same order that was used for training or a dictionary where the keys are the same as the column names used for training. The example below uses dictionary rows.
```
# Load the bank marketing datasets.
from numpy import array
X_test = test_dataset.drop_columns(columns=['y'])
y_test = test_dataset.keep_columns(columns=['y'], validate=True)
test_dataset.take(5).to_pandas_dataframe()
X_test = X_test.to_pandas_dataframe()
y_test = y_test.to_pandas_dataframe()
import json
import requests
X_test_json = X_test.to_json(orient='records')
data = "{\"data\": " + X_test_json +"}"
headers = {'Content-Type': 'application/json'}
resp = requests.post(aci_service.scoring_uri, data, headers=headers)
y_pred = json.loads(json.loads(resp.text))['result']
actual = array(y_test)
actual = actual[:,0]
print(len(y_pred), " ", len(actual))
```
### Calculate metrics for the prediction
Now visualize the data as a confusion matrix that compared the predicted values against the actual values.
```
%matplotlib notebook
from sklearn.metrics import confusion_matrix
import numpy as np
import itertools
cf =confusion_matrix(actual,y_pred)
plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest')
plt.colorbar()
plt.title('Confusion Matrix')
plt.xlabel('Predicted')
plt.ylabel('Actual')
class_labels = ['no','yes']
tick_marks = np.arange(len(class_labels))
plt.xticks(tick_marks,class_labels)
plt.yticks([-0.5,0,1,1.5],['','no','yes',''])
# plotting text value inside cells
thresh = cf.max() / 2.
for i,j in itertools.product(range(cf.shape[0]),range(cf.shape[1])):
plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black')
plt.show()
```
### Delete a Web Service
Deletes the specified web service.
```
aci_service.delete()
```
## Acknowledgements
This Bank Marketing dataset is made available under the Creative Commons (CCO: Public Domain) License: https://creativecommons.org/publicdomain/zero/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: https://creativecommons.org/publicdomain/zero/1.0/ and is available at: https://www.kaggle.com/janiobachmann/bank-marketing-dataset .
_**Acknowledgements**_
This data set is originally available within the UCI Machine Learning Database: https://archive.ics.uci.edu/ml/datasets/bank+marketing
[Moro et al., 2014] S. Moro, P. Cortez and P. Rita. A Data-Driven Approach to Predict the Success of Bank Telemarketing. Decision Support Systems, Elsevier, 62:22-31, June 2014
| github_jupyter |
# Example notebook
```
import perfume
import perfume.analyze
import pandas as pd
import bokeh.io
bokeh.io.output_notebook()
```
## Setup
To start, set up some functions to benchmark.
```
import time
import numpy as np
def test_fn_1():
good = np.random.poisson(20)
bad = np.random.poisson(100)
msec = np.random.choice([good, bad], p=[.99, .01])
time.sleep(msec / 3000.)
def test_fn_1_no_outliers():
time.sleep(np.random.poisson(20) / 3000.)
def test_fn_2():
good = np.random.poisson(5)
bad = np.random.poisson(150)
msec = np.random.choice([good, bad], p=[.95, .05])
time.sleep(msec / 3000.)
def test_fn_3():
msec = max(1, np.random.normal(100, 10))
time.sleep(msec / 3000.)
numbers = np.arange(0, 1, 1. / (3 * 5000000))
def test_fn_4():
return np.sum(numbers)
# Create a variable named "samples", in this cell. This way,
# if we change these functions, we'll reset the samples so we
# don't use old data with changed implementations.
samples = None
```
## Benchmark
Run the benchmark for a while by executing this cell. Since we capture the output data in `samples`, and pass it back in as an argument, you can interrupt the cell, take a look at the output so far, and then execute this cell again to resume the benchmark.
```
samples = perfume.bench(test_fn_1, test_fn_2, test_fn_3, test_fn_4,
samples=samples)
```
## Analyzing the samples
Let's look at the format of the output, each function execution gets its begin and end time recorded:
```
samples.head()
```
One thing we can do is plot each function's distribution as it develops over simulated time:
```
perfume.analyze.cumulative_quantiles_plot(samples)
```
We can run a K-S test and see whether our functions are significantly different:
```
perfume.analyze.ks_test(perfume.analyze.timings(samples))
```
We can convert them to elapsed timings instead of begin/end time points, get resampled timings to see outliers show a stronger presence, or isolate samples to be as if they ran by themselves
```
timings = perfume.analyze.timings(samples)
bt = perfume.analyze.bucket_resample_timings(samples)
isolated = perfume.analyze.isolate(samples)
isolated.head()
```
With these, and other charting libraries, you can do whatever you want with the data:
```
from bokeh import palettes
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
fig, ax = plt.subplots(figsize=(16, 9))
for col, color in zip(timings.columns, palettes.Set1[len(timings.columns)]):
sns.distplot(timings[col], label=col, color=color, ax=ax,
# hist_kws=dict(cumulative=True),
# kde_kws=dict(cumulative=True)
)
ax.set_xlabel('millis')
ax.legend()
timings.describe()
import matplotlib.pyplot as plt
timings['test_fn_1'].hist(cumulative=True, normed=1, alpha=0.3)
timings['test_fn_2'].hist(cumulative=True, normed=1, alpha=0.3)
import matplotlib.pyplot as plt
bt['test_fn_1'].hist(cumulative=True, normed=1, alpha=0.3)
bt['test_fn_2'].hist(cumulative=True, normed=1, alpha=0.3)
sns.pairplot(timings#, diag_kws={'cumulative': True}
)
import scipy.stats
bt = perfume.analyze.bucket_resample_timings(samples)
(scipy.stats.ks_2samp(timings['test_fn_1'], timings['test_fn_2']),
scipy.stats.ks_2samp(bt['test_fn_1'], bt['test_fn_2']))
```
| github_jupyter |
```
%matplotlib inline
# %config InlineBackend.figure_format = 'svg'
%reload_ext autoreload
%autoreload 2
from __future__ import division
import sys
import os
sys.path.append('../')
from modules.basics import *
from sklearn.model_selection import train_test_split
from lumin.plotting.data_viewing import plot_rank_order_dendrogram
from lumin.optimisation.features import rf_rank_features, auto_filter_on_linear_correlation, repeated_rf_rank_features, auto_filter_on_mutual_dependence
from rfpimp import plot_corr_heatmap, feature_dependence_matrix, plot_dependence_heatmap
from sklearn.ensemble import RandomForestRegressor
DATA_PATH = Path('../data')
!python ../modules/data_import.py -d ../data/
train_fy = FoldYielder(DATA_PATH/'train.hdf5', input_pipe=DATA_PATH/'input_pipe.pkl')
train_df = train_fy.get_df(inc_inputs=True, deprocess=True, suppress_warn=True, nan_to_num=True); train_df.head()
train_feats = train_fy.cont_feats+train_fy.cat_feats
df_trn, df_val = train_test_split(train_df, test_size=0.2, random_state=0, stratify=train_df.gen_target)
```
# Constant feats
```
const = []
for f in train_feats:
if len(train_df[f].unique()) == 1:
print(f)
const += [f]
train_feats = [f for f in train_feats if f not in const]
```
# Hyper opt
```
vec_feats = []
for c in ['px', 'py', 'pz']:
vec_feats += [f for f in train_feats if f == f'{f[:f.rfind("_")+1]}{c}']
len(vec_feats)
vec_feats
hl_feats = [f for f in train_feats if f.startswith('DER_')]; len(hl_feats)
ll_feats = [f for f in train_feats if f.startswith('PRI_')]; len(ll_feats)
from lumin.utils.misc import subsample_df
from lumin.optimisation.hyper_param import get_opt_rf_params
from collections import OrderedDict
subsample_rate = 0.1
tmp_trn = subsample_df(df_trn, 'classification', 'gen_target',
n_samples=int(subsample_rate*len(df_trn)), strat_key='gen_target', wgt_name='gen_weight')
rf_params, _ = get_opt_rf_params(x_trn=tmp_trn[hl_feats], y_trn=tmp_trn['gen_target'],
x_val=df_val[hl_feats], y_val=df_val['gen_target'],
objective='classification',
w_trn=tmp_trn['gen_weight'], w_val=df_val['gen_weight'],
n_estimators=40, verbose=True,
params=OrderedDict({'min_samples_leaf': [2,4,8,16,32,64,128],
'max_features': [0.3,0.5,0.7,0.9]}))
```
# Spearman's rank order correlation
```
filtered_feats = auto_filter_on_linear_correlation(train_df=df_trn, val_df=df_val,
check_feats=hl_feats, subsample_rate=0.01, n_rfs=5,
corr_threshold=0.8, rf_params=rf_params, optimise_rf=False,
objective='classification', targ_name='gen_target',
wgt_name='gen_weight', strat_key='gen_target')
```
# Permutation Importance
```
import_feats, fi = repeated_rf_rank_features(train_df=df_trn, val_df=df_val,
n_reps=10, min_frac_import=0.3,
rf_params=OrderedDict({'min_samples_leaf': [2,4,8,16,32,64,128],
'max_features': [0.3,0.5,0.7,0.9]}),
strat_key='gen_target', objective='classification',
subsample_rate=subsample_rate, resample_val=True,
train_feats=filtered_feats, wgt_name='gen_weight', importance_cut=0.001,
n_rfs=5, n_threads=5)
```
# Mutual dependence
```
filtered_feats = auto_filter_on_mutual_dependence(train_df=df_trn, val_df=df_val,
check_feats=import_feats,
objective='classification', targ_name='gen_target',
strat_key='gen_target', wgt_name='gen_weight',
subsample_rate=0.3,
rf_params=OrderedDict({'min_samples_leaf': [2,4,8,16,32,64,128],
'max_features': [0.3,0.5,0.7,0.9]}),
optimise_rf=True)
```
| github_jupyter |
```
import numpy as np
import os
from skimage import io
from skimage import color, exposure, transform
from PIL import Image
import cv2
import matplotlib
import matplotlib.pyplot as plt
import sys
from shutil import copyfile
from skimage import data, img_as_float
from skimage import exposure
import shutil
import keras
import tensorflow as tf
from keras.models import Sequential, Model
from keras.layers import Conv2D
from keras.layers import Input, MaxPooling2D, Dense, Dropout, Activation, Flatten, ZeroPadding2D
from keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping
from keras.layers.advanced_activations import LeakyReLU, ELU, PReLU
from keras.utils import np_utils
from keras.layers.normalization import BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.contrib import learn
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
def analyse_images(imgs_paths):
data = []
i = 0
for img_path in imgs_paths:
img_class = int(img_path.split('/')[-2])
img = io.imread(img_path)
# Histogram
img_h = exposure.equalize_hist(img)
data.append([img_h,img_class])
i = i+1
#Print every 10000 images
if ((i % 10000)== 0):
print ("Images processed: ")+str(i)
return data
directory = 'SGTSD/Images_blurred/'
paths = []
number_signs = 0
if (not os.path.exists("used_images")):
os.makedirs('used_images')
for sub_dir in os.listdir(directory):
sd = directory+sub_dir+'/'
if (sub_dir != ".DS_Store"):
number_signs = number_signs+1
images_paths = os.listdir(sd)
np.random.shuffle(images_paths)
for files in images_paths[:3400]:
if (files.endswith(".jpg")==True):
paths.append(directory+sub_dir+'/'+files)
if (not os.path.exists("used_images/"+sub_dir)): os.mkdir("used_images/"+sub_dir)
copyfile(directory+sub_dir+'/'+files, 'used_images/'+sub_dir+'/'+files)
orig_stdout = sys.stdout
f = open('Images_used.txt', 'w')
sys.stdout = f
for img_used in paths:
print(img_used)
print""
sys.stdout = orig_stdout
f.close()
np.random.shuffle(paths)
data = analyse_images(paths)
elements = []
classes = []
for d in data:
elements.append(d[0])
classes.append(d[1])
direct = 'val_set_blurred/'
val_paths = []
for sub_dir in os.listdir(direct):
sd = direct+sub_dir+'/'
if (sub_dir != ".DS_Store"):
for files in os.listdir(sd):
if (files.endswith(".csv")==False) and (files.endswith(".DS_Store")==False):
val_paths.append(direct+sub_dir+'/'+files)
np.random.shuffle(val_paths)
val_data = analyse_images(val_paths)
val_elements = []
val_classes = []
for vd in val_data:
val_elements.append(vd[0])
val_classes.append(vd[1])
#PReLU(alpha_initializer="zero", weights=None)(norm_1)#ELU(alpha=0.001)(norm_1)#
def cnn_model(height,width,depth,number_signs):
inp = Input(shape=(height, width, depth))
conv_1 = Conv2D(32, (3, 3), padding='same')(inp)
norm_1 = BatchNormalization()(conv_1)
act1 = ELU(alpha=0.001)(norm_1)
conv_2 = Conv2D(32, (3, 3), padding='same')(act1)
norm_2 = BatchNormalization()(conv_2)
act2 = ELU(alpha=0.001)(norm_2)
pool_1 = MaxPooling2D((2, 2), data_format="channels_last")(act2)
drop_1 = Dropout(0.2)(pool_1)
conv_3 = Conv2D(64, (3, 3), padding='same')(drop_1)
norm_3 = BatchNormalization()(conv_3)
act3 = ELU(alpha=0.001)(norm_3)
conv_4 = Conv2D(64, (3, 3), padding='same')(act3)
norm_4 = BatchNormalization()(conv_4)
act4 = ELU(alpha=0.001)(norm_4)
pool_2 = MaxPooling2D((2, 2), data_format="channels_last")(act4)
drop_2 = Dropout(0.2)(pool_2)
conv_5 = Conv2D(128, (3, 3), padding='same')(drop_2)
norm_5 = BatchNormalization()(conv_5)
act5 = ELU(alpha=0.001)(norm_5)
conv_6 = Conv2D(128, (3, 3), padding='same')(act5)
norm_6 = BatchNormalization()(conv_6)
act6 = ELU(alpha=0.001)(norm_6)
pool_3 = MaxPooling2D((2, 2), data_format="channels_last")(act6)
drop_3 = Dropout(0.2)(pool_3)
flat = Flatten()(drop_3)
hidden = Dense(512)(flat)
norm_7 = BatchNormalization()(hidden)
act7 = ELU(alpha=0.001)(norm_7)
drop_4 = Dropout(0.5)(act7)
out = Dense(number_signs, activation='softmax')(drop_4)
model = Model(inputs=inp, outputs=out)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
model = cnn_model(48,48,3,number_signs)
from sklearn.cross_validation import train_test_split
batch_size = 32
nb_epochs = 100
X_train = np.array(elements, dtype='float32')
Y_train = np.eye(number_signs, dtype='uint8')[classes]
X_val = np.array(val_elements, dtype='float32')
Y_val = np.eye(number_signs, dtype='uint8')[val_classes]
datagen = ImageDataGenerator(featurewise_center=False,
featurewise_std_normalization=False,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2,
shear_range=0.1,
rotation_range=10.)
datagen.fit(X_train)
#early_stopping_monitor = EarlyStopping(patience=10)
filepath = "weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
#history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),steps_per_epoch=1000,epochs=nb_epochs,callbacks=callbacks_list, validation_data=(X_val, Y_val))
history = model.fit(X_train, Y_train,batch_size=batch_size,epochs=nb_epochs,callbacks=callbacks_list, validation_data=(X_val,Y_val))
score = model.evaluate(X_val, Y_val, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.ylim([0.0,1.0])
yintreval = np.arange(0.0, 1.05, 0.05)
xintreval = np.arange(0, 101, 4)
plt.yticks(yintreval)
plt.xticks(xintreval)
plt.grid()
fig = plt.figure()
fig.savefig('CNN -accuracy.png')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.xticks(xintreval)
plt.grid()
fig = plt.figure()
fig.savefig('CNN -loss.png')
plt.show()
orig_stdout = sys.stdout
f = open('Model_Summary.txt', 'w')
sys.stdout = f
print(model.summary())
sys.stdout = orig_stdout
f.close()
orig_stdout = sys.stdout
f = open('History.txt', 'w')
sys.stdout = f
print(history.history)
print('Test score:', score[0])
print('Test accuracy:', score[1])
sys.stdout = orig_stdout
f.close()
model.load_weights("weights.best.hdf5")
score = model.evaluate(X_val, Y_val, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
from IPython.display import Image, display, SVG
from keras.utils.vis_utils import model_to_dot
import pydot
import graphviz
# Save the model as png file
from keras.utils.vis_utils import plot_model
plot_model(model, to_file='model.png', show_shapes=True)
Y_predict = model.predict(X_val)
import operator
truth_table = np.zeros((50,50))
truth_table.astype(int)
orig_stdout = sys.stdout
f = open('Validation_set_predictions_summary.txt', 'w')
sys.stdout = f
for i in range(len(Y_predict)):
obj_class = 0
for c in range(len(Y_val[i])):
if (Y_val[i][c]==1):
obj_class = c
break
predictions = Y_predict[i]
possible_classes = list(range(50))
dictionary = {}
for el in range(len(possible_classes)):
dictionary[possible_classes[el]] = (Y_predict[i][el]*100)
print ""
print "Road Sign Actual Class: "+str(obj_class)
print "Class predicted: "+str(max(dictionary.iteritems(), key=operator.itemgetter(1))[0])
print "% per class: " + str(dictionary)
print ""
truth_table[int(obj_class),int(max(dictionary.iteritems(), key=operator.itemgetter(1))[0])] = truth_table[int(obj_class),int(max(dictionary.iteritems(), key=operator.itemgetter(1))[0])] + 1
sys.stdout = orig_stdout
f.close()
truth_table = truth_table.astype(int)
np.savetxt('truth_table.csv', truth_table, fmt='%10.0f', delimiter=',')
import seaborn as sn
plt.figure(figsize = (30,30))
sn.heatmap(truth_table, annot=True)
plt.show()
from quiver_engine import server
server.launch(model,temp_folder='./imgs',input_folder='./val_set_blurred/3', port=5000)
```
| github_jupyter |
## Subplots with table and traces with different realtive position ##
```
from plotly.offline import download_plotlyjs, init_notebook_mode, iplot, plot
init_notebook_mode(connected=True)
import pandas as pd
import datetime
df=pd.read_excel('Mining-BTC-180.xls')
df.head()
df.columns
```
Convert each string in `df['Date']` to a datetime.date element.
```
df['Date']=map(lambda x: x.date(),[pd.to_datetime(dr) for dr in df['Date']])
df.columns
table_trace1=dict(type = 'table',
domain=dict(x= [0.0, 0.5],
y= [0, 1.0]),
columnwidth= [30]+[ 33, 35, 33],
columnorder=[0, 1, 2, 3, 4],
header = dict(height = 50,
values = [['<b>Date</b>'],['<b>Hash Rate, TH/sec</b>'],
['<b>Mining revenue</b>'], ['<b>Transaction fees</b>']],
line = dict(color='rgb(50,50,50)'),
align = ['left']*5,
font = dict(color=['rgb(45,45,45)']*5, size=14),
fill = dict( color = '#d562be' )#fill color for header
),
cells = dict(values = [df['Date'][-20:], df['Hash-rate'][-20:], df['Mining-revenue-USD'][-20:],
df['Transaction-fees-BTC'][-20:],
],
line = dict(color='#506784'),
align = ['left']*5,
font = dict(color=['rgb(40,40,40)']*5, size=12),
format = [None]+ [",.2f"]*2+[',.4f'],
prefix = [None]*2+['$', u'\u20BF'],
suffix=[None]*4,
height = 27,
fill = dict( color = ['rgb(235,193, 238', 'rgba(228, 222,249, 0.65)'] )
)
)
trace1=dict(type='scatter',
x=df['Date'],
y=df['Hash-rate'],
xaxis='x1',
yaxis='y1',
mode='lines',
line=dict(width=2, color='#9748a1'),
name='hash-rate-TH/s')
trace2=dict(type='scatter',
x=df['Date'],
y=df['Mining-revenue-USD'],
xaxis='x2',
yaxis='y2',
mode='lines',
line=dict(width=2, color='#b04553'),
name='mining revenue')
trace3=dict(type='scatter',
x=df['Date'],
y=df['Transaction-fees-BTC'],
xaxis='x3',
yaxis='y3',
mode='lines',
line=dict(width=2, color='#af7bbd'),
name='transact-fee')
axis=dict(showline=True, zeroline=False, showgrid=True, mirror=True,
ticklen=4, gridcolor='#ffffff', tickfont=dict(size=10))
layout1 = dict(width=950, height=800, autosize=False, title='Bitcoin mining stats for 180 days',
margin = dict(t=100,l=0,r=0,b=100),
showlegend=False,
xaxis1=dict(axis, **dict(domain=[0.55,0.98], anchor= 'y1', showticklabels=False)),
xaxis2=dict(axis, **dict(domain=[0.55,0.98], anchor= 'y2', showticklabels=False)),
xaxis3=dict(axis, **dict(domain=[0.55,0.98], anchor= 'y3')),
yaxis1=dict(axis, **dict(domain=[0.68,1], anchor= 'x1', hoverformat='.2f')),
yaxis2=dict(axis, **dict(domain=[0.34,0.66], anchor= 'x2', tickprefix='$', hoverformat='.2f')),
yaxis3=dict(axis, **dict(domain=[0.0,0.32], anchor= 'x3', tickprefix=u'\u20BF', hoverformat='.2f')),
plot_bgcolor='rgba(228, 222,249, 0.65)' ,
annotations=[dict(showarrow=False,
text='The last 20 records',
xref='paper',
yref='paper',
x=0.2,
y=1.01,
xanchor='left',
yanchor='bottom',
font=dict(
size=15 ))]
)
fig1 = dict(data=[table_trace1, trace1, trace2, trace3], layout=layout1)
iplot(fig1)
df.columns
table_trace2=dict(type = 'table',
domain=dict(x= [0.1, 0.9],
y= [0.4, 1]),
columnwidth= [30]+[ 33, 35, 33],
columnorder=[0, 1, 2, 3, 4],
header = dict(height = 50,
values = [['<b>Date</b>'],['<b>Nr-transactions</b>'],
['<b>Output-volume</b>'],
['<b>Cost-per-trans</b>']],
line = dict(color='rgb(50,50,50)'),
align = ['left']*4,
font = dict(color=['rgb(45,45,45)']*4, size=14),
fill = dict( color = '#d562be' )#fill color for header
),
cells = dict(values = [df['Date'][-60:-44],
df['Number-transactions'][-60:-44],
df['Output-volume(BTC)'][-60:-44],
df['Cost-per-trans-USD'][-60:-44]
],
line = dict(color='#506784'),
align = ['left']*4,
font = dict(color=['rgb(40,40,40)']*4, size=12),
format = [None]+ ['d']+[",.2f"]*2,
prefix = [None]*2+[ u'\u20BF', '$'],
suffix=[None]*4,
height = 27,
fill = dict( color = ['rgb(235,193, 238', 'rgba(228, 222,249, 0.65)'] )
)
)
trace4=dict(type='scatter',
x=df['Date'],
y=df['Number-transactions'],
xaxis='x1',
yaxis='y1',
mode='lines',
line=dict(width=2, color='#af7bbd'),
name='nr-transac')
trace5=dict(type='scatter',
x=df['Date'],
y=df['Output-volume(BTC)'],
xaxis='x2',
yaxis='y2',
mode='lines',
line=dict(width=2, color='#b04553'),
name='ouput-vol')
trace6=dict(type='scatter',
x=df['Date'],
y= df['Cost-per-trans-USD'],
xaxis='x3',
yaxis='y3',
mode='lines',
line=dict(width=2, color='#9748a1'),
name='cost-transac')
layout2 = dict(width=800, height=1000, autosize=False, title='Bitcoin transaction summary for 180 days',
margin = dict(t=100, l=60,r=0),
showlegend=False,
xaxis1=dict(axis, **dict(domain=[0.1,0.9], anchor= 'y1', showgrid=False, showticklabels=False)),
xaxis2=dict(axis, **dict(domain=[0.1,0.9], anchor= 'y2', showgrid=False, showticklabels=False)),
xaxis3=dict(axis, **dict(domain=[0.1,0.9], anchor= 'y3',showgrid=False, showticklabels=True)),
yaxis1=dict(axis, **dict(domain=[0.28, 0.398], anchor= 'x1', showgrid=False, hoverformat='d')),
yaxis2=dict(axis, **dict(domain=[0.14, 0.26], anchor= 'x2', showgrid=False, tickprefix= u'\u20BF', hoverformat='.2f')),
yaxis3=dict(axis, **dict(domain=[0, 0.12], anchor= 'x3', showgrid=False, tickprefix='S', hoverformat='.2f')),
plot_bgcolor='rgba(228, 222,249, 0.65)',
annotations=[dict(showarrow=False,
text='16 records around the date of september 3, 2017',
xref='paper',
yref='paper',
x=0.2,
y=1.01,
xanchor='left',
yanchor='bottom',
font=dict(
size=15 ))],
shapes=[dict(type='rect',
xref='x3',
yref='y3',
x0=df['Date'].iloc[-60],
y0=8,
x1=df['Date'].iloc[-43],
y1=53,
layer='below',
line=dict(color='rgba(235,193, 238, 0.9)', width=0.5),
fillcolor= 'rgba(235,193, 238, 0.9)'
),
dict(type='rect',
xref='x2',
yref='y2',
x0=df['Date'].iloc[-60],
y0=3327226,
x1=df['Date'].iloc[-43],
y1=11801113,
layer='below',
line=dict(color='rgba(235,193, 238, 0.9)', width=0.5),
fillcolor= 'rgba(235,193, 238, 0.9)'
),
dict(type='rect',
xref='x1',
yref='y1',
x0=df['Date'].iloc[-60],
y0=130875,
x1=df['Date'].iloc[-43],
y1=375098,
layer='below',
line=dict(color='rgba(235,193, 238, 1)', width=0.5),
fillcolor= 'rgba(235,193, 238, 1)'
)]
)
fig2 = dict(data=[table_trace2, trace4, trace5, trace6], layout=layout2)
iplot(fig2)
```
| github_jupyter |
```
from collections import defaultdict
from sortedcontainers import SortedDict
import math
import pandas as pd
import numpy as np
from copy import copy
from pyqstrat.pq_utils import str2date
from pyqstrat.pq_types import ContractGroup
def calc_trade_pnl(open_qtys, open_prices, new_qtys, new_prices, multiplier):
'''
>>> print(calc_trade_pnl(
... open_qtys = np.array([], dtype = np.float), open_prices = np.array([], dtype = np.float),
... new_qtys = np.array([-8, 9, -4]), new_prices = np.array([10, 11, 6]), multiplier = 100))
(array([-3.]), array([6.]), -3.0, 6.0, -1300.0)
>>> print(calc_trade_pnl(open_qtys = np.array([], dtype = np.float), open_prices = np.array([], dtype = np.float), new_qtys = np.array([3, 10, -5]),
... new_prices = np.array([51, 50, 45]), multiplier = 100))
(array([8.]), array([50.]), 8.0, 50.0, -2800.0)
>>> print(calc_trade_pnl(open_qtys = np.array([]), open_prices = np.array([]),
... new_qtys = np.array([-58, -5, -5, 6, -8, 5, 5, -5, 19, 7, 5, -5, 39]),
... new_prices = np.array([2080, 2075.25, 2070.75, 2076, 2066.75, 2069.25, 2074.75, 2069.75, 2087.25, 2097.25, 2106, 2088.25, 2085.25]),
... multiplier = 50))
(array([], dtype=float64), array([], dtype=float64), 0.0, 0, -33762.5) '''
#TODO: Cythonize this
realized = 0.
unrealized = 0.
new_qtys = new_qtys.copy()
new_prices = new_prices.copy()
_open_prices = np.zeros(len(open_prices) + len(new_prices), dtype = np.float)
_open_prices[:len(open_prices)] = open_prices
_open_qtys = np.zeros(len(open_qtys) + len(new_qtys), dtype = np.float)
_open_qtys[:len(open_qtys)] = open_qtys
new_qty_indices = np.nonzero(new_qtys)[0]
open_qty_indices = np.zeros(len(_open_qtys), dtype = np.int)
nonzero_indices = np.nonzero(_open_qtys)[0]
open_qty_indices[:len(nonzero_indices)] = nonzero_indices
i = 0 # index into new_qty_indices to get idx of the new qty we are currently netting
l = len(nonzero_indices) # virtual length of open_qty_indices
j = 0 # index into open_qty_indices to get idx of the open qty we are currently netting
k = len(open_qtys) # virtual length of _open_qtys
# Try to net all new trades against existing non-netted trades.
# Append any remaining non-netted new trades to end of existing trades
while i < len(new_qty_indices):
# Always try to net first non-zero new trade against first non-zero existing trade
# FIFO acccounting
new_idx = new_qty_indices[i]
new_qty, new_price = new_qtys[new_idx], new_prices[new_idx]
#print(f'i: {i} j: {j} k: {k} l: {l} oq: {_open_qtys} oqi: {open_qty_indices} op: {_open_prices} nq: {new_qtys} np: {new_prices}')
if j < l: # while we still have open positions to net against
open_idx = open_qty_indices[j]
open_qty, open_price = _open_qtys[open_idx], _open_prices[open_idx]
if math.copysign(1, open_qty) == math.copysign(1, new_qty):
# Nothing to net against so add this trade to the array and wait for the next offsetting trade
_open_qtys[k] = new_qty
_open_prices[k] = new_price
open_qty_indices[l] = k
k += 1
l += 1
new_qtys[new_idx] = 0
i += 1
elif abs(new_qty) > abs(open_qty):
# New trade has more qty than offsetting trade so:
# a. net against offsetting trade
# b. remove the offsetting trade
# c. reduce qty of new trade
open_qty, open_price = _open_qtys[open_idx], _open_prices[open_idx]
realized += open_qty * (new_price - open_price)
#print(f'open_qty: {open_qty} open_price: {open_price} open_idx: {open_idx} i: {i} j: {j} k: {k} l: {l} oq: {_open_qtys} oqi: {open_qty_indices} op: {_open_prices} nq: {new_qtys} np: {new_prices}')
_open_qtys[open_idx] = 0
j += 1
new_qtys[new_idx] += open_qty
else:
# New trade has less qty than offsetting trade so:
# a. net against offsetting trade
# b. remove new trade
# c. reduce qty of offsetting trade
realized += new_qty * (open_price - new_price)
new_qtys[new_idx] = 0
i += 1
_open_qtys[open_idx] += new_qty
else:
# Nothing to net against so add this trade to the open trades array and wait for the next offsetting trade
_open_qtys[k] = new_qty
_open_prices[k] = new_price
open_qty_indices[l] = k
k += 1
l += 1
new_qtys[new_idx] = 0
i += 1
mask = _open_qtys != 0
_open_qtys = _open_qtys[mask]
_open_prices = _open_prices[mask]
open_qty = np.sum(_open_qtys)
if math.isclose(open_qty, 0):
weighted_avg_price = 0
else:
weighted_avg_price = np.sum(_open_qtys * _open_prices) / open_qty
return _open_qtys, _open_prices, open_qty, weighted_avg_price, realized * multiplier
def leading_nan_to_zero(df, columns):
for column in columns:
vals = df[column].values
first_non_nan_index = np.ravel(np.nonzero(~np.isnan(vals)))
if len(first_non_nan_index):
first_non_nan_index = first_non_nan_index[0]
else:
first_non_nan_index = -1
if first_non_nan_index > 0 and first_non_nan_index < len(vals):
vals[:first_non_nan_index] = np.nan_to_num(vals[:first_non_nan_index])
df[column] = vals
return df
def find_last_non_nan_index(array):
i = np.nonzero(np.isfinite(array))[0]
if len(i): return i[-1]
return 0
def find_index_before(sorted_dict, key):
'''
Find index of the first key in a sorted dict that is less than or equal to the key passed in.
If the key is less than the first key in the dict, return -1
'''
size = len(sorted_dict)
if not size: return -1
i = sorted_dict.bisect_left(key)
if i == size: return size - 1
if sorted_dict.keys()[i] != key:
return i - 1
return i
class ContractPNL:
'''Computes pnl for a single contract over time given trades and market data'''
def __init__(self, contract, account_timestamps, price_function, strategy_context):
self.contract = contract
self._price_function = price_function
self.strategy_context = strategy_context
self._account_timestamps = account_timestamps
self._trade_pnl = SortedDict()
self._net_pnl = SortedDict()
# Store trades that are not offset so when new trades come in we can offset against these to calc pnl
self.open_qtys = np.empty(0, dtype = np.int)
self.open_prices = np.empty(0, dtype = np.float)
self.first_trade_timestamp = None
self.final_pnl = None
def _add_trades(self, trades):
'''
Args:
trades (list of :obj:`Trade`): Must be sorted by timestamp
'''
if not len(trades): return
timestamps = [trade.timestamp for trade in trades]
if len(self._trade_pnl):
k, v = self._trade_pnl.peekitem(0)
if timestamps[0] <= k:
raise Exception(f'Can only add a trade that is newer than last added current: {timestamps[0]} prev max timestamp: {k}')
if self.first_trade_timestamp is None: self.first_trade_timestamp = timestamps[0]
for i, timestamp in enumerate(timestamps):
t_trades = [trade for trade in trades if trade.timestamp == timestamp]
open_qtys, open_prices, open_qty, weighted_avg_price, realized_chg = calc_trade_pnl(
self.open_qtys, self.open_prices,
np.array([trade.qty for trade in t_trades]),
np.array([trade.price for trade in t_trades]),
self.contract.multiplier)
self.open_qtys = open_qtys
self.open_prices = open_prices
position_chg = sum([trade.qty for trade in t_trades])
commission_chg = sum([trade.commission for trade in t_trades])
fee_chg = sum([trade.fee for trade in t_trades])
index = find_index_before(self._trade_pnl, timestamp)
if index == -1:
self._trade_pnl[timestamp] = (position_chg, realized_chg, fee_chg, commission_chg, open_qty, weighted_avg_price)
else:
prev_timestamp, (prev_position, prev_realized, prev_fee, prev_commission, _, _) = self._trade_pnl.peekitem(index)
self._trade_pnl[timestamp] = (prev_position + position_chg, prev_realized + realized_chg,
prev_fee + fee_chg, prev_commission + commission_chg, open_qty, weighted_avg_price)
self.calc_net_pnl(timestamp)
def calc_net_pnl(self, timestamp):
if timestamp in self._net_pnl: return
if timestamp < self.first_trade_timestamp: return
# TODO: Option expiry should be a special case. If option expires at 3:00 pm, we put in an expiry order at 3 pm and the
# trade comes in at 3:01 pm. In this case, the final pnl is recorded at 3:01 but should be at 3 pm.
if self.contract.expiry is not None and timestamp > self.contract.expiry and self.final_pnl is not None: return
i = np.searchsorted(self._account_timestamps, timestamp)
assert(self._account_timestamps[i] == timestamp)
# Find the index before or equal to current timestamp. If not found, set to 0's
trade_pnl_index = find_index_before(self._trade_pnl, timestamp)
if trade_pnl_index == -1:
realized, fee, commission, open_qty, open_qty, weighted_avg_price = 0, 0, 0, 0, 0, 0
else:
_, (_, realized, fee, commission, open_qty, weighted_avg_price) = self._trade_pnl.peekitem(trade_pnl_index)
price = np.nan
if math.isclose(open_qty, 0):
unrealized = 0
else:
price = self._price_function(self.contract, self._account_timestamps, i, self.strategy_context)
assert np.isreal(price), \
f'Unexpected price type: {price} {type(price)} for contract: {self.contract} timestamp: {self._account_timestamps[i]}'
if math.isnan(price):
index = find_index_before(self._net_pnl, timestamp) # Last index we computed net pnl for
if index == -1:
prev_unrealized = 0
else:
_, (_, prev_unrealized, _) = self._net_pnl.peekitem(index)
unrealized = prev_unrealized
else:
unrealized = open_qty * (price - weighted_avg_price) * self.contract.multiplier
net_pnl = realized + unrealized - commission - fee
self._net_pnl[timestamp] = (price, unrealized, net_pnl)
if self.contract.expiry is not None and timestamp > self.contract.expiry:
self.final_pnl = net_pnl
def calc_net_pnl_old(self, timestamp):
if timestamp in self._net_pnl: return
if timestamp < self.first_trade_timestamp: return
if self.contract.expiry is not None and timestamp > self.contract.expiry: return # We would have calc'ed when the exit trade came in
i = np.searchsorted(self._account_timestamps, timestamp)
assert(self._account_timestamps[i] == timestamp)
# Find the index before or equal to current timestamp. If not found, set to 0's
trade_pnl_index = find_index_before(self._trade_pnl, timestamp)
if trade_pnl_index == -1:
realized, fee, commission, open_qty, open_qty, weighted_avg_price = 0, 0, 0, 0, 0, 0
else:
_, (_, realized, fee, commission, open_qty, weighted_avg_price) = self._trade_pnl.peekitem(trade_pnl_index)
price = np.nan
if not math.isclose(open_qty, 0):
price = self._price_function(self.contract, self._account_timestamps, i, self.strategy_context)
assert np.isreal(price), \
f'Unexpected price type: {price} {type(price)} for contract: {self.contract} timestamp: {self._account_timestamps[i]}'
if math.isnan(price):
index = find_index_before(self._net_pnl, timestamp) # Last index we computed net pnl for
if index == -1:
prev_unrealized = 0
else:
_, (_, prev_unrealized, _) = self._net_pnl.peekitem(index)
unrealized = prev_unrealized
else:
unrealized = open_qty * (price - weighted_avg_price) * self.contract.multiplier
net_pnl = realized + unrealized - commission - fee
self._net_pnl[timestamp] = (price, unrealized, net_pnl)
#print(f'{self._trade_pnl} net_pnl: {net_pnl} {self.contract.symbol} i: {i} {timestamp} price: {price} realized: {realized}' + (
# f' unrealized: {unrealized} commission: {commission} fee: {fee} open_qty: {open_qty} wap: {weighted_avg_price}'))
def position(self, timestamp):
index = find_index_before(self._trade_pnl, timestamp)
if index == -1: return 0.
_, (position, _, _, _, _, _) = self._trade_pnl.peekitem(index) # Less than or equal to timestamp
return position
def net_pnl(self, timestamp):
if self.contract.expiry is not None and timestamp > self.contract.expiry and self.final_pnl is not None:
return self.final_pnl
index = find_index_before(self._net_pnl, timestamp)
if index == -1: return 0.
_, (_, _, net_pnl) = self._net_pnl.peekitem(index) # Less than or equal to timestamp
return net_pnl
def pnl(self, timestamp):
index = find_index_before(self._trade_pnl, timestamp)
position, realized, fee, commission, price, unrealized, net_pnl = 0, 0, 0, 0, 0, 0, 0
if index != -1:
_, (position, realized, fee, commission, _, _) = self._trade_pnl.peekitem(index) # Less than or equal to timestamp
index = find_index_before(self._net_pnl, timestamp)
if index != -1:
_, (price, unrealized, net_pnl) = self._net_pnl.peekitem(index) # Less than or equal to timestamp
return position, price, realized, unrealized, fee, commission, net_pnl
def df(self):
'''Returns a pandas dataframe with pnl data'''
df_trade_pnl = pd.DataFrame.from_records([
(k, v[0], v[1], v[2], v[3]) for k, v in self._trade_pnl.items()],
columns = ['timestamp', 'position', 'realized', 'fee', 'commission'])
df_net_pnl = pd.DataFrame.from_records([
(k, v[0], v[1], v[2]) for k, v in self._net_pnl.items()],
columns = ['timestamp', 'price', 'unrealized', 'net_pnl'])
all_timestamps = np.unique(np.concatenate((df_trade_pnl.timestamp.values, df_net_pnl.timestamp.values)))
#position = df_trade_pnl.set_index('timestamp').position.reindex(all_timestamps, fill_value = 0)
df_trade_pnl = df_trade_pnl.set_index('timestamp').reindex(all_timestamps, method = 'ffill').reset_index()
df_trade_pnl = leading_nan_to_zero(df_trade_pnl, ['position', 'realized', 'fee', 'commission'])
#df_trade_pnl.position = position.values
df_net_pnl = df_net_pnl.set_index('timestamp').reindex(all_timestamps, method = 'ffill').reset_index()
del df_net_pnl['timestamp']
df = pd.concat([df_trade_pnl, df_net_pnl], axis = 1)
df['symbol'] = self.contract.symbol
df = df[['symbol', 'timestamp', 'position', 'price', 'unrealized', 'realized', 'commission', 'fee', 'net_pnl']]
return df
def _get_calc_timestamps(timestamps, pnl_calc_time):
time_delta = np.timedelta64(pnl_calc_time, 'm')
calc_timestamps = np.unique(timestamps.astype('M8[D]')) + time_delta
calc_indices = np.searchsorted(timestamps, calc_timestamps, side='left') - 1
if calc_indices[0] == -1: calc_indices[0] = 0
return np.unique(timestamps[calc_indices])
class Account:
'''An Account calculates pnl for a set of contracts'''
def __init__(self, contract_groups, timestamps, price_function, strategy_context, starting_equity = 1.0e6, pnl_calc_time = 15 * 60):
'''
Args:
contract_groups (list of :obj:`ContractGroup`): Contract groups that we want to compute PNL for
timestamps (list of np.datetime64): Timestamps that we might compute PNL at
price_function (function): Function that takes a symbol, timestamps, index, strategy context and
returns the price used to compute pnl
starting_equity (float, optional): Starting equity in account currency. Default 1.e6
pnl_calc_time (int, optional): Number of minutes past midnight that we should calculate PNL at. Default 15 * 60, i.e. 3 pm
'''
self.starting_equity = starting_equity
self._price_function = price_function
self.strategy_context = strategy_context
self.timestamps = timestamps
self.calc_timestamps = _get_calc_timestamps(timestamps, pnl_calc_time)
self.contracts = set()
self._trades = []
self._pnl = SortedDict()
self.symbol_pnls_by_contract_group = defaultdict(list)
self.symbol_pnls = {}
def symbols(self):
return [contract.symbol for contract in self.contracts]
def _add_contract(self, contract, timestamp):
if contract.symbol in self.symbol_pnls:
raise Exception(f'Already have contract with symbol: {contract.symbol} {contract}')
contract_pnl = ContractPNL(contract, self.timestamps, self._price_function, self.strategy_context)
self.symbol_pnls[contract.symbol] = contract_pnl
# For fast lookup in position function
self.symbol_pnls_by_contract_group[contract.contract_group.name].append(contract_pnl)
self.contracts.add(contract)
def add_trades(self, trades):
trades = sorted(trades, key = lambda x : getattr(x, 'timestamp'))
# Break up trades by contract so we can add them in a batch
trades_by_contract = defaultdict(list)
for trade in trades:
contract = trade.contract
if contract not in self.contracts: self._add_contract(contract, trade.timestamp)
trades_by_contract[contract].append(trade)
for contract, contract_trades in trades_by_contract.items():
contract_trades.sort(key=lambda x: x.timestamp)
self.symbol_pnls[contract.symbol]._add_trades(contract_trades)
self._trades += trades
def calc(self, timestamp):
'''
Computes P&L and stores it internally for all contracts.
Args:
timestamp (np.datetime64): timestamp to compute P&L at. Account remembers the last timestamp it computed P&L up to and will compute P&L
between these and including timestamp. If there is more than one day between the last index and current index, we will
include pnl for at the defined pnl_calc_time for those dates as well.
'''
if timestamp in self._pnl: return
prev_idx = find_index_before(self._pnl, timestamp)
prev_timestamp = None if prev_idx == -1 else self.timestamps[prev_idx]
# Find the last timestamp per day that is between the previous index we computed and the current index,
# so we can compute daily pnl in addition to the current index pnl
calc_timestamps = self.calc_timestamps
intermediate_calc_timestamps = calc_timestamps[calc_timestamps <= timestamp]
if prev_timestamp is not None:
intermediate_calc_timestamps = intermediate_calc_timestamps[intermediate_calc_timestamps > prev_timestamp]
if not len(intermediate_calc_timestamps) or intermediate_calc_timestamps[-1] != timestamp:
intermediate_calc_timestamps = np.append(intermediate_calc_timestamps, timestamp)
for ts in intermediate_calc_timestamps:
net_pnl = 0
for symbol_pnl in self.symbol_pnls.values():
symbol_pnl.calc_net_pnl(ts)
net_pnl += symbol_pnl.net_pnl(ts)
self._pnl[ts] = net_pnl
def position(self, contract_group, timestamp):
'''Returns netted position for a contract_group at a given date in number of contracts or shares.'''
position = 0
for symbol_pnl in self.symbol_pnls_by_contract_group[contract_group.name]:
position += symbol_pnl.position(timestamp)
return position
#for contract in contract_group.contracts:
# symbol = contract.symbol
# if symbol not in self.symbol_pnls: continue
# position += self.symbol_pnls[symbol].position(timestamp)
#return position
def positions(self, contract_group, timestamp):
'''
Returns all non-zero positions in a contract group
'''
positions = []
for contract in contract_group.contracts:
symbol = contract.symbol
if symbol not in self.symbol_pnls: continue
position = self.symbol_pnls[symbol].position(timestamp)
if not math.isclose(position, 0): positions.append((contract, position))
return positions
def equity(self, timestamp):
'''Returns equity in this account in Account currency. Will cause calculation if Account has not previously
calculated up to this date'''
pnl = self._pnl.get(timestamp)
if pnl is None:
self.calc(timestamp)
pnl = self._pnl[timestamp]
return self.starting_equity + pnl
def trades(self, contract_group = None, start_date = None, end_date = None):
'''Returns a list of trades with the given symbol and with trade date between (and including) start date
and end date if they are specified. If symbol is None trades for all symbols are returned'''
start_date, end_date = str2date(start_date), str2date(end_date)
return [trade for trade in self._trades if (start_date is None or trade.timestamp >= start_date) and (
end_date is None or trade.timestamp <= end_date) and (
contract_group is None or trade.contract.contract_group == contract_group)]
def df_pnl(self, contract_groups = None):
'''
Returns a dataframe with P&L columns broken down by contract group and symbol
Args:
contract_group (:obj:`ContractGroup`, optional): Return PNL for this contract group.
If None (default), include all contract groups
'''
if contract_groups is None:
contract_groups = set([contract.contract_group for contract in self.contracts])
if isinstance(contract_groups, ContractGroup): contract_groups = [contract_groups]
dfs = []
for contract_group in contract_groups:
for contract in contract_group.contracts:
symbol = contract.symbol
if symbol not in self.symbol_pnls: continue
df = self.symbol_pnls[symbol].df()
if len(df) > 1:
net_pnl_diff = np.diff(df.net_pnl.values) # np.diff returns a vector one shorter than the original
last_index = np.nonzero(net_pnl_diff)
if len(last_index[0]):
last_index = last_index[0][-1] + 1
df = df.iloc[:last_index + 1]
df['contract_group'] = contract_group.name
dfs.append(df)
ret_df = pd.concat(dfs)
ret_df = ret_df.sort_values(by = ['timestamp', 'contract_group', 'symbol'])
ret_df = ret_df[['timestamp', 'contract_group', 'symbol', 'position', 'price', 'unrealized', 'realized',
'commission', 'fee', 'net_pnl']]
return ret_df
def df_account_pnl(self, contract_group = None):
'''
Returns PNL at the account level.
Args:
contract_group (:obj:`ContractGroup`, optional): If set, we only return pnl for this contract_group
'''
if contract_group is not None:
symbols = [contract.symbol for contract in contract_group.contracts if contract.symbol in self.symbol_pnls]
symbol_pnls = [self.symbol_pnls[symbol] for symbol in symbols]
else:
symbol_pnls = self.symbol_pnls.values()
timestamps = self.calc_timestamps
position = np.full(len(timestamps), 0., dtype = np.float)
realized = np.full(len(timestamps), 0., dtype = np.float)
unrealized = np.full(len(timestamps), 0., dtype = np.float)
fee = np.full(len(timestamps), 0., dtype = np.float)
commission = np.full(len(timestamps), 0., dtype = np.float)
net_pnl = np.full(len(timestamps), 0., dtype = np.float)
for i, timestamp in enumerate(timestamps):
for symbol_pnl in symbol_pnls:
_position, _price, _realized, _unrealized, _fee, _commission, _net_pnl = symbol_pnl.pnl(timestamp)
if math.isfinite(_position): position[i] += _position
if math.isfinite(_realized): realized[i] += _realized
if math.isfinite(_unrealized): unrealized[i] += _unrealized
if math.isfinite(_fee): fee[i] += _fee
if math.isfinite(_commission): commission[i] += _commission
if math.isfinite(_net_pnl): net_pnl[i] += _net_pnl
df = pd.DataFrame.from_records(zip(timestamps, position, unrealized, realized, commission, fee, net_pnl),
columns = ['timestamp', 'position', 'unrealized', 'realized', 'commission', 'fee', 'net_pnl'])
df['equity'] = self.starting_equity + df.net_pnl
return df[['timestamp', 'position', 'unrealized', 'realized', 'commission', 'fee', 'net_pnl', 'equity']]
def df_trades(self, contract_group = None, start_date = None, end_date = None):
'''
Returns a dataframe of trades
Args:
contract_group (:obj:`ContractGroup`, optional): Return trades for this contract group.
If None (default), include all contract groups
start_date (:obj:`np.datetime64`, optional): Include trades with date greater than or equal to this timestamp.
end_date (:obj:`np.datetime64`, optional): Include trades with date less than or equal to this timestamp.
'''
start_date, end_date = str2date(start_date), str2date(end_date)
trades = self.trades(contract_group, start_date, end_date)
df = pd.DataFrame.from_records([(trade.contract.symbol, trade.timestamp, trade.qty, trade.price,
trade.fee, trade.commission, trade.order.timestamp, trade.order.qty,
trade.order.reason_code,
(str(trade.order.properties.__dict__) if trade.order.properties.__dict__ else ''),
(str(trade.contract.properties.__dict__) if trade.contract.properties.__dict__ else '')
) for trade in trades],
columns = ['symbol', 'timestamp', 'qty', 'price', 'fee', 'commission', 'order_date', 'order_qty',
'reason_code', 'order_props', 'contract_props'])
df = df.sort_values(by = ['timestamp', 'symbol'])
return df
def test_account():
#if __name__ == "__main__":
from pyqstrat.pq_types import Contract, ContractGroup, Trade
from pyqstrat.orders import MarketOrder
import math
def get_close_price(contract, timestamps, idx, strategy_context):
if contract.symbol == "IBM":
price = idx + 10.1
elif contract.symbol == "MSFT":
price = idx + 15.3
else:
raise Exception(f'unknown contract: {contract}')
return price
ContractGroup.clear()
Contract.clear()
ibm_cg = ContractGroup.create('IBM')
msft_cg = ContractGroup.create('MSFT')
ibm_contract = Contract.create('IBM', contract_group = ibm_cg)
msft_contract = Contract.create('MSFT', contract_group = msft_cg)
timestamps = np.array(['2018-01-01 09:00', '2018-01-02 08:00', '2018-01-02 09:00', '2018-01-05 13:35'], dtype = 'M8[m]')
account = Account([ibm_cg, msft_cg], timestamps, get_close_price, None)
#account = Account([Contract(symbol)], timestamps, get_close_price)
trade_1 = Trade(ibm_contract, np.datetime64('2018-01-02 08:00'), 10, 10.1, commission = 0.01,
order = MarketOrder(ibm_contract, np.datetime64('2018-01-01 09:00'), 10))
trade_2 = Trade(ibm_contract, np.datetime64('2018-01-02 09:00'), -20, 15.1, commission = 0.02,
order = MarketOrder(ibm_contract, np.datetime64('2018-01-01 09:00'), -20))
trade_3 = Trade(msft_contract, timestamps[1], 20, 13.2, commission = 0.04, order = MarketOrder(msft_contract, timestamps[1], 15))
trade_4 = Trade(msft_contract, timestamps[2], 20, 16.2, commission = 0.05, order = MarketOrder(msft_contract, timestamps[2], 20))
account.add_trades([trade_1, trade_2, trade_3, trade_4])
account.calc(np.datetime64('2018-01-05 13:35'))
assert(len(account.df_trades()) == 4)
assert(len(account.df_pnl()) == 6)
assert(np.allclose(np.array([9.99, 61.96, 79.97, 103.91, 69.97, 143.91]),
account.df_pnl().net_pnl.values, rtol = 0))
assert(np.allclose(np.array([10, 20, -10, 40, -10, 40]), account.df_pnl().position.values, rtol = 0))
assert(np.allclose(np.array([1000000. , 1000183.88, 1000213.88]), account.df_account_pnl().equity.values, rtol = 0))
if __name__ == "__main__":
test_account()
import doctest
doctest.testmod(optionflags = doctest.NORMALIZE_WHITESPACE)
```
| github_jupyter |
```
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
sys.path.append(module_path)
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
color = sns.color_palette()
%matplotlib inline
matplotlib.style.use('ggplot')
import time
import numpy as np
import pandas as pd
from IPython.display import display
# remove warnings
import warnings
warnings.filterwarnings('ignore')
import lightgbm as lgbm
from sklearn.model_selection import train_test_split
from sklearn.metrics import auc, roc_curve
from itertools import product
# my module
from conf.configure import Configure
from utils import data_utils, dataframe_util
from utils.common_utils import common_num_range
import model.get_datasets as gd
```
# Load Datasets
```
train = pd.read_csv(Configure.base_path + 'huang_lin/train_dataHL.csv')
test = pd.read_csv(Configure.base_path + 'huang_lin/test_dataHL.csv')
y_train = train['orderType']
train.drop(['orderType'], axis=1, inplace=True)
df_columns = train.columns.values
print('train: {}, test: {}, feature count: {}, orderType 1:0 = {:.5f}'.format(
train.shape[0], test.shape[0], len(df_columns), 1.0*sum(y_train) / len(y_train)))
dtrain = lgbm.Dataset(train, label=y_train)
```
# Parameter Fine Tuning
```
import lightgbm as lgbm
from sklearn.model_selection import train_test_split
from sklearn.metrics import auc, roc_curve
from itertools import product
def model_cross_validate(model_params, cv_param_dict, dtrain, cv_num_boost_round=4000, early_stopping_rounds=100, cv_nfold=5, stratified=True):
params_value = []
params_name = cv_param_dict.keys()
max_auc = 0
for param in params_name:
params_value.append(cv_param_dict[param])
for param_pair in product(*params_value):
param_str = ''
for i in xrange(len(param_pair)):
param_str += params_name[i] + '=' + str(param_pair[i]) + ' '
model_params[params_name[i]] = param_pair[i]
start = time.time()
cv_result = lgbm.cv(model_params, dtrain, num_boost_round=cv_num_boost_round, stratified=stratified,
nfold=cv_nfold, early_stopping_rounds=early_stopping_rounds)
best_num_boost_rounds = len(cv_result['auc-mean'])
mean_test_auc = np.mean(cv_result['auc-mean'][best_num_boost_rounds-6 : best_num_boost_rounds-1])
if mean_test_auc > max_auc:
best_param = param_pair
max_auc = mean_test_auc
end = time.time()
print('{}, best_ntree_limit:{}, auc = {:.7f}, cost: {}s'.format(param_str, best_num_boost_rounds,
mean_test_auc, end-start))
param_str = ''
for i in xrange(len(best_param)):
param_str += params_name[i] + '=' + str(best_param[i]) + ' '
model_params[params_name[i]] = best_param[i]
print('===========best paramter: {} auc={:.7f}==========='.format(param_str, max_auc))
```
### Step 1: Fix learning rate and number of estimators for tuning tree-based parameters
### Baseline model
```
lgbm_params = {
'objective': 'binary',
'metric': 'auc',
'boosting_type': 'gbdt',
'min_split_gain': 0,
'min_child_weight': 4,
'learning_rate': 0.1,
'num_leaves': 64,
'min_sum_hessian_in_leaf': 0.1,
'feature_fraction': 0.5,
'feature_fraction_seed': 10,
'bagging_fraction': 0.6,
'bagging_seed': 10,
'lambda_l1': 0.5,
'lambda_l2': 0.5,
'num_thread': -1,
'verbose': 0
}
print('---> calc baseline model')
cv_num_boost_round=4000
early_stopping_rounds=100
cv_nfold=5
stratified=True
cv_result = lgbm.cv(lgbm_params,
dtrain,
nfold=cv_nfold,
stratified=stratified,
num_boost_round=cv_num_boost_round,
early_stopping_rounds=early_stopping_rounds,
)
best_num_boost_rounds = len(cv_result['auc-mean'])
mean_test_auc = np.mean(cv_result['auc-mean'][best_num_boost_rounds-6 : best_num_boost_rounds-1])
print('mean_test_auc = {:.7f}\n'.format(mean_test_auc))
```
### Fine tune *num_leaves* and *min_child_weight*
```
cv_paramters = {'num_leaves':[2**5, 2**6, 2**7]}
model_cross_validate(lgbm_params, cv_paramters, dtrain)
```
### Tune bagging_fraction and feature_fraction
```
cv_paramters = {'bagging_fraction':common_num_range(0.5, 1.1, 0.1), 'feature_fraction':common_num_range(0.5,1.1,0.1)}
model_cross_validate(lgbm_params, cv_paramters,dtrain)
```
### Tuning Regularization Parameters: lambda_l1, lambda_l2
```
cv_paramters = {'lambda_l1':[0, 1, 10, 50, 100],'lambda_l2':[0, 1, 10, 50, 100]}
model_cross_validate(lgbm_params,cv_paramters,dtrain)
```
### Reducing Learning Rate and Done!
```
lgbm_params['feature_fraction'] = 0.9
lgbm_params['bagging_fraction'] = 0.7
lgbm_params
```
| github_jupyter |
# #2 Discovering Butterfree - Spark Functions and Window
Welcome to Discovering Butterfree tutorial series!
This is the second tutorial of this series: its goal is to cover spark functions and windows definition.
Before diving into the tutorial make sure you have a basic understanding of these main data concepts: features, feature sets and the "Feature Store Architecture", you can read more about this [here].
## Example:
Simulating the following scenario (the same from previous tutorial):
- We want to create a feature set with features about houses for rent (listings).
- We are interested in houses only for the **Kanto** region.
We have two sets of data:
- Table: `listing_events`. Table with data about events of house listings.
- File: `region.json`. Static file with data about the cities and regions.
Our desire is to have result dataset with the following schema:
* id: **int**;
* timestamp: **timestamp**;
* rent: **float**;
* rent__avg_over_2_events_row_windows: **float**;
* rent__stddev_pop_over_2_events_row_windows: **float**;
* rent_over_area: **float**;
* bedrooms: **int**;
* bathrooms: **int**;
* area: **float**;
* bedrooms_over_area: **float**;
* bathrooms_over_area: **float**;
* latitude: **double**;
* longitude: **double**;
* lat_lng__h3_hash__10: **string**;
* city: **string**;
* region: **string**.
Note that we're going to compute two aggregated features, rent average and standard deviation, considering the two last occurrences (or events). It'd also be possible to define time windows, instead of windows based on events.
For more information about H3 geohash click [here](https://h3geo.org/docs/).
The following code blocks will show how to generate this feature set using Butterfree library:
```
# setup spark
from pyspark import SparkContext, SparkConf
from pyspark.sql import session
conf = SparkConf().set('spark.driver.host','127.0.0.1')
sc = SparkContext(conf=conf)
spark = session.SparkSession(sc)
# fix working dir
import pathlib
import os
path = os.path.join(pathlib.Path().absolute(), '../..')
os.chdir(path)
```
### Showing test data
```
listing_evengs_df = spark.read.json(f"{path}/examples/data/listing_events.json")
listing_evengs_df.createOrReplaceTempView("listing_events") # creating listing_events view
region = spark.read.json(f"{path}/examples/data/region.json")
```
Listing events table:
```
listing_evengs_df.toPandas()
```
Region table:
```
region.toPandas()
```
### Extract
- For the extract part, we need the `Source` entity and the `FileReader` and `TableReader` for the data we have;
- We need to declare a query with the rule for joining the results of the readers too;
- As proposed in the problem we can filter the region dataset to get only **Kanto** region.
```
from butterfree.clients import SparkClient
from butterfree.extract import Source
from butterfree.extract.readers import FileReader, TableReader
from butterfree.extract.pre_processing import filter
readers = [
TableReader(id="listing_events", table="listing_events",),
FileReader(id="region", path=f"{path}/examples/data/region.json", format="json",).with_(
transformer=filter, condition="region == 'Kanto'"
),
]
query = """
select
listing_events.*,
region.city,
region.region,
region.lat,
region.lng,
region.region as region_name
from
listing_events
join region
on listing_events.region_id = region.id
"""
source = Source(readers=readers, query=query)
spark_client = SparkClient()
source_df = source.construct(spark_client)
```
And, finally, it's possible to see the results from building our souce dataset:
```
source_df.toPandas()
```
### Transform
- At the transform part, a set of `Feature` objects is declared;
- An Instance of `FeatureSet` is used to hold the features;
- A `FeatureSet` can only be created when it is possible to define a unique tuple formed by key columns and a time reference. This is an **architectural requirement** for the data. So least one `KeyFeature` and one `TimestampFeature` is needed;
- Every `Feature` needs a unique name, a description, and a data-type definition;
- A `H3HashTransform` is used to convert specific locations to a h3 hash;
- A `CustomTransform` operator is used to illustrate how custom transform methods can be used within a `FeatureSet`;
- Finally, a `SparkFunctionTransform` is defined in order to compute mean and standard deviation for rent, considering the last two events (row window definition).
```
from pyspark.sql import functions as F
from butterfree.transform import FeatureSet
from butterfree.transform.features import Feature, KeyFeature, TimestampFeature
from butterfree.transform.transformations import (
CustomTransform,
SparkFunctionTransform,
)
from butterfree.transform.transformations.h3_transform import H3HashTransform
from butterfree.constants import DataType
from butterfree.transform.utils import Function
def divide(df, fs, column1, column2):
name = fs.get_output_columns()[0]
df = df.withColumn(name, F.col(column1) / F.col(column2))
return df
keys = [
KeyFeature(
name="id",
description="Unique identificator code for houses.",
dtype=DataType.BIGINT,
)
]
# from_ms = True because the data originally is not in a Timestamp format.
ts_feature = TimestampFeature(from_column="timestamp", from_ms=True)
features = [
Feature(
name="rent",
description="Rent value by month described in the listing.",
dtype=DataType.FLOAT,
),
Feature(
name="rent",
description="Rent value by month described in the listing.",
dtype=DataType.FLOAT,
transformation=SparkFunctionTransform(
functions=[
Function(F.avg, DataType.FLOAT),
Function(F.stddev_pop, DataType.FLOAT),
]
).with_window(
partition_by="id",
mode="row_windows",
window_definition=["2 events"],
),
),
Feature(
name="rent_over_area",
description="Rent value by month divided by the area of the house.",
transformation=CustomTransform(
transformer=divide, column1="rent", column2="area",
),
dtype=DataType.FLOAT,
),
Feature(
name="bedrooms",
description="Number of bedrooms of the house.",
dtype=DataType.INTEGER,
),
Feature(
name="bathrooms",
description="Number of bathrooms of the house.",
dtype=DataType.INTEGER,
),
Feature(
name="area",
description="Area of the house, in squared meters.",
dtype=DataType.FLOAT,
),
Feature(
name="bedrooms_over_area",
description="Number of bedrooms divided by the area.",
transformation=CustomTransform(
transformer=divide, column1="bedrooms", column2="area",
),
dtype=DataType.FLOAT,
),
Feature(
name="bathrooms_over_area",
description="Number of bathrooms divided by the area.",
transformation=CustomTransform(
transformer=divide, column1="bathrooms", column2="area",
),
dtype=DataType.FLOAT,
),
Feature(
name="latitude",
description="House location latitude.",
from_column="lat", # arg from_column is needed when changing column name
dtype=DataType.DOUBLE,
),
Feature(
name="longitude",
description="House location longitude.",
from_column="lng",
dtype=DataType.DOUBLE,
),
Feature(
name="h3",
description="H3 hash geohash.",
transformation=H3HashTransform(
h3_resolutions=[10], lat_column="latitude", lng_column="longitude",
),
dtype=DataType.STRING,
),
Feature(name="city", description="House location city.", dtype=DataType.STRING,),
Feature(
name="region",
description="House location region.",
from_column="region_name",
dtype=DataType.STRING,
),
]
feature_set = FeatureSet(
name="house_listings",
entity="house", # entity: to which "business context" this feature set belongs
description="Features describring a house listing.",
keys=keys,
timestamp=ts_feature,
features=features,
)
feature_set_df = feature_set.construct(source_df, spark_client)
```
The resulting dataset from the running the transformations defined within the `FeatureSet` are:
```
feature_set_df.toPandas()
```
### Load
- For the load part we need `Writer` instances and a `Sink`.
- writers define where to load the data.
- The `Sink` gets the transformed data (feature set) and trigger the load to all the defined `writers`.
- `debug_mode` will create a temporary view instead of trying to write in a real data store.
```
from butterfree.load.writers import (
HistoricalFeatureStoreWriter,
OnlineFeatureStoreWriter,
)
from butterfree.load import Sink
writers = [HistoricalFeatureStoreWriter(debug_mode=True), OnlineFeatureStoreWriter(debug_mode=True)]
sink = Sink(writers=writers)
```
## Pipeline
- The `Pipeline` entity wraps all the other defined elements.
- `run` command will trigger the execution of the pipeline, end-to-end.
```
from butterfree.pipelines import FeatureSetPipeline
pipeline = FeatureSetPipeline(source=source, feature_set=feature_set, sink=sink)
result_df = pipeline.run()
```
### Showing the results
```
spark.table("historical_feature_store__house_listings").orderBy(
"id", "timestamp"
).toPandas()
spark.table("online_feature_store__house_listings").orderBy("id", "timestamp").toPandas()
```
- We can see that we were able to create all the desired features in an easy way
- The **historical feature set** holds all the data, and we can see that it is partitioned by year, month and day (columns added in the `HistoricalFeatureStoreWriter`)
- In the **online feature set** there is only the latest data for each id
| github_jupyter |
# Calculate Coverage
You have a large region of interest. You need to identify an AOI for your study. One of the inputs to that decision is the coverage within the region. This notebook will walk you through that process.
In this notebook, we create the coverage map for PS Orthotiles collected in 2017 through August for the state of Iowa. The coverage calculation is performed in WGS84 because it covers a larger area than a single UTM zone.
Ideas for improvements:
- investigate projection
```
# Notebook dependencies
from __future__ import print_function
import datetime
import copy
# from functools import partial
import os
from IPython.display import display # , Image
import matplotlib
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
# import pandas as pd
from planet import api
from planet.api import filters
# import pyproj
import rasterio
from rasterio import features as rfeatures
from shapely import geometry as sgeom
# import shapely.ops
%matplotlib inline
```
## Define AOI
Define the AOI as a geojson polygon. This can be done at [geojson.io](http://geojson.io). If you use geojson.io, only copy the single aoi feature, not the entire feature collection.
```
iowa = {"geometry": {"type": "Polygon", "coordinates": [[[-91.163064, 42.986781], [-91.14556, 42.90798], [-91.143375, 42.90467], [-91.117411, 42.895837], [-91.100565, 42.883078], [-91.077643, 42.803798], [-91.069549, 42.769628], [-91.064896, 42.757272], [-91.053733, 42.738238], [-90.976314, 42.695996], [-90.949213, 42.685573], [-90.896961, 42.674407], [-90.84391, 42.663071], [-90.769495, 42.651443], [-90.720209, 42.640758], [-90.709204, 42.636078], [-90.702671, 42.630756], [-90.645627, 42.5441], [-90.636727, 42.518702], [-90.636927, 42.513202], [-90.640927, 42.508302], [-90.655927, 42.491703], [-90.656527, 42.489203], [-90.656327, 42.483603], [-90.654027, 42.478503], [-90.646727, 42.471904], [-90.624328, 42.458904], [-90.559451, 42.430695], [-90.477279, 42.383794], [-90.474834, 42.381473], [-90.419027, 42.328505], [-90.416535, 42.325109], [-90.4162, 42.321314], [-90.424326, 42.293326], [-90.430735, 42.284211], [-90.430884, 42.27823], [-90.419326, 42.254467], [-90.391108, 42.225473], [-90.375129, 42.214811], [-90.356964, 42.205445], [-90.349162, 42.204277], [-90.316269, 42.1936], [-90.306531, 42.190439], [-90.234919, 42.165431], [-90.211328, 42.15401], [-90.167533, 42.122475], [-90.162225, 42.11488], [-90.161119, 42.104404], [-90.163405, 42.087613], [-90.168358, 42.075779], [-90.166495, 42.054543], [-90.164537, 42.045007], [-90.154221, 42.033073], [-90.150916, 42.02944], [-90.141167, 42.008931], [-90.140613, 41.995999], [-90.1516, 41.931002], [-90.152104, 41.928947], [-90.181973, 41.80707], [-90.187969, 41.803163], [-90.20844, 41.797176], [-90.222263, 41.793133], [-90.242747, 41.783767], [-90.278633, 41.767358], [-90.302782, 41.750031], [-90.309826, 41.743321], [-90.31522, 41.734264], [-90.317041, 41.729104], [-90.334525, 41.679559], [-90.343162, 41.648141], [-90.34165, 41.621484], [-90.39793, 41.572233], [-90.461432, 41.523533], [-90.474332, 41.519733], [-90.499475, 41.518055], [-90.605937, 41.494232], [-90.655839, 41.462132], [-90.737537, 41.450127], [-90.771672, 41.450761], [-90.786282, 41.452888], [-90.847458, 41.455019], [-90.989976, 41.431962], [-91.027787, 41.423603], [-91.039872, 41.418523], [-91.047819, 41.4109], [-91.078682, 41.336089], [-91.079657, 41.333727], [-91.114186, 41.250029], [-91.113648, 41.241401], [-91.049808, 41.178033], [-91.019036, 41.16491], [-91.005503, 41.165622], [-90.997906, 41.162564], [-90.989663, 41.155716], [-90.946627, 41.096632], [-90.949383, 41.072711], [-90.949383, 41.07271], [-90.948523, 41.070248], [-90.945549, 41.06173], [-90.942253, 41.034702], [-90.952233, 40.954047], [-90.962916, 40.924957], [-90.968995, 40.919127], [-90.9985, 40.90812], [-91.009536, 40.900565], [-91.092993, 40.821079], [-91.097553, 40.808433], [-91.097031, 40.802471], [-91.094728, 40.797833], [-91.11194, 40.697018], [-91.112258, 40.696218], [-91.122421, 40.670675], [-91.138055, 40.660893], [-91.185415, 40.638052], [-91.18698, 40.637297], [-91.197906, 40.636107], [-91.218437, 40.638437], [-91.253074, 40.637962], [-91.306524, 40.626231], [-91.339719, 40.613488], [-91.348733, 40.609695], [-91.359873, 40.601805], [-91.405241, 40.554641], [-91.406851, 40.547557], [-91.404125, 40.539127], [-91.384531, 40.530948], [-91.369059, 40.512532], [-91.364211, 40.500043], [-91.36391, 40.490122], [-91.372554, 40.4012], [-91.375746, 40.391879], [-91.38836, 40.384929], [-91.419422, 40.378264], [-91.484507, 40.3839], [-91.490977, 40.393484], [-91.524612, 40.410765], [-91.619486, 40.507134], [-91.622362, 40.514362], [-91.618028, 40.53403], [-91.620071, 40.540817], [-91.696359, 40.588148], [-91.716769, 40.59853], [-91.729115, 40.61364], [-91.785916, 40.611488], [-91.795374, 40.611101], [-91.800133, 40.610953], [-91.813968, 40.610526], [-91.824826, 40.610191], [-91.832481, 40.609797], [-91.868401, 40.608059], [-91.943114, 40.605913], [-91.970988, 40.605112], [-91.998683, 40.604433], [-92.029649, 40.603713], [-92.067904, 40.602648], [-92.069521, 40.602772], [-92.082339, 40.602176], [-92.0832, 40.602244], [-92.092875, 40.602082], [-92.096387, 40.60183], [-92.17978, 40.600529], [-92.196162, 40.600069], [-92.201669, 40.59998], [-92.217603, 40.599832], [-92.236484, 40.599531], [-92.298754, 40.598469], [-92.331205, 40.597805], [-92.331445, 40.597714], [-92.350776, 40.597274], [-92.350807, 40.597273], [-92.379691, 40.596509], [-92.453745, 40.595288], [-92.461609, 40.595355], [-92.481692, 40.594941], [-92.482394, 40.594894], [-92.484588, 40.594924], [-92.580278, 40.592151], [-92.637898, 40.590853], [-92.639223, 40.590825], [-92.686693, 40.589809], [-92.689854, 40.589884], [-92.714598, 40.589564], [-92.742232, 40.589207], [-92.757407, 40.588908], [-92.828061, 40.588593], [-92.827992, 40.588515], [-92.835074, 40.588484], [-92.857391, 40.58836], [-92.863034, 40.588175], [-92.879178, 40.588341], [-92.889796, 40.588039], [-92.903544, 40.58786], [-92.941595, 40.587743], [-92.957747, 40.58743], [-93.085517, 40.584403], [-93.097296, 40.584014], [-93.098507, 40.583973], [-93.260612, 40.580797], [-93.317605, 40.580671], [-93.345442, 40.580514], [-93.374386, 40.580334], [-93.441767, 40.579916], [-93.465297, 40.580164], [-93.466887, 40.580072], [-93.524124, 40.580481], [-93.527607, 40.580436], [-93.528177, 40.580367], [-93.548284, 40.580417], [-93.553986, 40.580303], [-93.556899, 40.580235], [-93.558938, 40.580189], [-93.560798, 40.580304], [-93.56524, 40.580143], [-93.56581, 40.580075], [-93.566189, 40.580117], [-93.597352, 40.579496], [-93.656211, 40.578352], [-93.659272, 40.57833], [-93.661913, 40.578354], [-93.668845, 40.578241], [-93.677099, 40.578127], [-93.690333, 40.577875], [-93.722443, 40.577641], [-93.728355, 40.577547], [-93.737259, 40.577542], [-93.742759, 40.577518], [-93.750223, 40.57772], [-93.770231, 40.577615], [-93.774344, 40.577584], [-93.815485, 40.577278], [-93.818725, 40.577086], [-93.84093, 40.576791], [-93.853656, 40.576606], [-93.898327, 40.576011], [-93.899317, 40.575942], [-93.900877, 40.575874], [-93.913961, 40.575672], [-93.935687, 40.57533], [-93.936317, 40.575284], [-93.937097, 40.575421], [-93.938627, 40.575284], [-93.939857, 40.575192], [-93.963863, 40.574754], [-93.976766, 40.574635], [-94.015492, 40.573914], [-94.034134, 40.573585], [-94.080223, 40.572899], [-94.080463, 40.572899], [-94.089194, 40.572806], [-94.091085, 40.572897], [-94.23224, 40.571907], [-94.28735, 40.571521], [-94.294813, 40.571341], [-94.310724, 40.571524], [-94.324765, 40.571477], [-94.336556, 40.571475], [-94.336706, 40.571452], [-94.358307, 40.571363], [-94.429725, 40.571041], [-94.460088, 40.570947], [-94.470648, 40.57083], [-94.471213, 40.570825], [-94.48928, 40.570707], [-94.533878, 40.570739], [-94.537058, 40.570763], [-94.538318, 40.570763], [-94.541828, 40.570809], [-94.542154, 40.570809], [-94.594001, 40.570966], [-94.632032, 40.571186], [-94.632035, 40.571186], [-94.682601, 40.571787], [-94.714925, 40.572201], [-94.716665, 40.572201], [-94.773988, 40.572977], [-94.811188, 40.573532], [-94.819978, 40.573714], [-94.823758, 40.573942], [-94.896801, 40.574738], [-94.901451, 40.574877], [-94.914896, 40.575068], [-94.955134, 40.575669], [-94.966491, 40.575839], [-95.068921, 40.57688], [-95.079742, 40.577007], [-95.097607, 40.577168], [-95.107213, 40.577116], [-95.110303, 40.57716], [-95.110663, 40.577206], [-95.112222, 40.577228], [-95.120829, 40.577413], [-95.154499, 40.57786], [-95.164058, 40.578017], [-95.202264, 40.578528], [-95.211408, 40.57865], [-95.21159, 40.578654], [-95.212715, 40.578679], [-95.213327, 40.578689], [-95.217455, 40.578759], [-95.218783, 40.578781], [-95.221525, 40.578827], [-95.335588, 40.579871], [-95.357802, 40.5801], [-95.373893, 40.580501], [-95.373923, 40.580501], [-95.415406, 40.581014], [-95.469319, 40.58154], [-95.525392, 40.58209], [-95.526682, 40.582136], [-95.533182, 40.582249], [-95.554959, 40.582629], [-95.574046, 40.582963], [-95.611069, 40.583495], [-95.64184, 40.584234], [-95.687442, 40.58438], [-95.6875, 40.584381], [-95.746443, 40.584935], [-95.765645, 40.585208], [-95.753148, 40.59284], [-95.750053, 40.597052], [-95.748626, 40.603355], [-95.776251, 40.647463], [-95.786568, 40.657253], [-95.795489, 40.662384], [-95.822913, 40.66724], [-95.842801, 40.677496], [-95.883178, 40.717579], [-95.888907, 40.731855], [-95.88669, 40.742101], [-95.881529, 40.750611], [-95.872281, 40.758349], [-95.861695, 40.762871], [-95.854172, 40.784012], [-95.821193, 40.876682], [-95.823123, 40.900924], [-95.829074, 40.975688], [-95.835434, 40.984184], [-95.867286, 41.001599], [-95.867246, 41.043671], [-95.866289, 41.051731], [-95.853396, 41.16028], [-95.852788, 41.165398], [-95.91459, 41.185098], [-95.92319, 41.190998], [-95.923219, 41.191046], [-95.92599, 41.195698], [-95.927491, 41.202198], [-95.924891, 41.211198], [-95.90249, 41.273398], [-95.91379, 41.320197], [-95.92569, 41.322197], [-95.939291, 41.328897], [-95.953091, 41.339896], [-95.956691, 41.345496], [-95.956791, 41.349196], [-95.93831, 41.392162], [-95.937346, 41.394403], [-95.930705, 41.433894], [-95.981319, 41.506837], [-95.994784, 41.526242], [-96.030593, 41.527292], [-96.036603, 41.509047], [-96.040701, 41.507076], [-96.046707, 41.507085], [-96.055096, 41.509577], [-96.089714, 41.531778], [-96.09409, 41.539265], [-96.118105, 41.613495], [-96.116233, 41.621574], [-96.097728, 41.639633], [-96.095046, 41.647365], [-96.095415, 41.652736], [-96.099837, 41.66103], [-96.121726, 41.68274], [-96.096795, 41.698681], [-96.077088, 41.715403], [-96.064537, 41.793002], [-96.06577, 41.798174], [-96.071007, 41.804639], [-96.077646, 41.808804], [-96.086407, 41.81138], [-96.110907, 41.830818], [-96.139554, 41.86583], [-96.144483, 41.871854], [-96.161756, 41.90182], [-96.161988, 41.905553], [-96.159098, 41.910057], [-96.142265, 41.915379], [-96.136743, 41.920826], [-96.129186, 41.965136], [-96.129505, 41.971673], [-96.22173, 42.026205], [-96.251714, 42.040472], [-96.272877, 42.047238], [-96.279079, 42.074026], [-96.307421, 42.130707], [-96.344121, 42.162091], [-96.349688, 42.172043], [-96.35987, 42.210545], [-96.356666, 42.215077], [-96.356591, 42.215182], [-96.336323, 42.218922], [-96.323723, 42.229887], [-96.322868, 42.233637], [-96.328905, 42.254734], [-96.348814, 42.282024], [-96.375307, 42.318339], [-96.384169, 42.325874], [-96.407998, 42.337408], [-96.413895, 42.343393], [-96.417786, 42.351449], [-96.415509, 42.400294], [-96.413609, 42.407894], [-96.387608, 42.432494], [-96.380707, 42.446394], [-96.381307, 42.461694], [-96.385407, 42.473094], [-96.396107, 42.484095], [-96.409408, 42.487595], [-96.443408, 42.489495], [-96.466253, 42.497702], [-96.476947, 42.508677], [-96.481308, 42.516556], [-96.479909, 42.524195], [-96.477709, 42.535595], [-96.476952, 42.556079], [-96.479685, 42.561238], [-96.516338, 42.630435], [-96.542366, 42.660736], [-96.575299, 42.682665], [-96.601989, 42.697429], [-96.60614, 42.694661], [-96.610975, 42.694751], [-96.630617, 42.70588], [-96.639704, 42.737071], [-96.633168, 42.768325], [-96.632142, 42.770863], [-96.577813, 42.828102], [-96.563058, 42.831051], [-96.552092, 42.836057], [-96.549513, 42.839143], [-96.54146, 42.857682], [-96.523264, 42.909059], [-96.510749, 42.944397], [-96.509479, 42.971122], [-96.513111, 43.02788], [-96.466017, 43.062235], [-96.455107, 43.083366], [-96.439335, 43.113916], [-96.436589, 43.120842], [-96.475571, 43.221054], [-96.485264, 43.224183], [-96.557126, 43.224192], [-96.572489, 43.249178], [-96.584124, 43.268101], [-96.586317, 43.274319], [-96.56911, 43.295535], [-96.551929, 43.292974], [-96.530392, 43.300034], [-96.525564, 43.312467], [-96.521264, 43.374978], [-96.521697, 43.386897], [-96.524044, 43.394762], [-96.529152, 43.397735], [-96.531159, 43.39561], [-96.53746, 43.395246], [-96.557586, 43.406792], [-96.594254, 43.434153], [-96.60286, 43.450907], [-96.600039, 43.45708], [-96.58407, 43.468856], [-96.587151, 43.484697], [-96.598928, 43.500457], [-96.591213, 43.500514], [-96.453049, 43.500415], [-96.351059, 43.500333], [-96.332062, 43.500415], [-96.208814, 43.500391], [-96.198766, 43.500312], [-96.198484, 43.500335], [-96.053163, 43.500176], [-95.861152, 43.499966], [-95.860946, 43.499966], [-95.834421, 43.499966], [-95.821277, 43.499965], [-95.741569, 43.499891], [-95.740813, 43.499894], [-95.514774, 43.499865], [-95.486803, 43.500246], [-95.486737, 43.500274], [-95.475065, 43.500335], [-95.454706, 43.500563], [-95.454706, 43.500648], [-95.454433, 43.500644], [-95.434293, 43.50036], [-95.434199, 43.500314], [-95.387851, 43.50024], [-95.387812, 43.50024], [-95.387787, 43.50024], [-95.375269, 43.500322], [-95.374737, 43.500314], [-95.250969, 43.500464], [-95.250762, 43.500406], [-95.214938, 43.500885], [-95.180423, 43.500774], [-95.167891, 43.500885], [-95.167294, 43.500771], [-95.122633, 43.500755], [-95.114874, 43.500667], [-95.054289, 43.50086], [-95.053504, 43.500769], [-95.034, 43.500811], [-95.014245, 43.500872], [-94.99446, 43.500523], [-94.974359, 43.500508], [-94.954477, 43.500467], [-94.934625, 43.50049], [-94.914955, 43.50045], [-94.914905, 43.50045], [-94.914634, 43.50045], [-94.914523, 43.50045], [-94.887291, 43.500502], [-94.874235, 43.500557], [-94.872725, 43.500564], [-94.860192, 43.500546], [-94.857867, 43.500615], [-94.854555, 43.500614], [-94.615916, 43.500544], [-94.565665, 43.50033], [-94.560838, 43.500377], [-94.47042, 43.50034], [-94.447048, 43.500639], [-94.442848, 43.500583], [-94.442835, 43.500583], [-94.390597, 43.500469], [-94.377466, 43.500379], [-94.247965, 43.500333], [-94.10988, 43.500283], [-94.108068, 43.5003], [-94.094339, 43.500302], [-94.092894, 43.500302], [-93.970762, 43.499605], [-93.97076, 43.499605], [-93.795793, 43.49952], [-93.794285, 43.499542], [-93.716217, 43.499563], [-93.708771, 43.499564], [-93.704916, 43.499568], [-93.699345, 43.499576], [-93.648533, 43.499559], [-93.617131, 43.499548], [-93.576728, 43.49952], [-93.558631, 43.499521], [-93.532178, 43.499472], [-93.528482, 43.499471], [-93.497405, 43.499456], [-93.49735, 43.499456], [-93.488261, 43.499417], [-93.482009, 43.499482], [-93.472804, 43.4994], [-93.468563, 43.499473], [-93.428509, 43.499478], [-93.399035, 43.499485], [-93.2718, 43.499356], [-93.228861, 43.499567], [-93.049192, 43.499571], [-93.024429, 43.499572], [-93.024348, 43.499572], [-93.007871, 43.499604], [-92.870277, 43.499548], [-92.790317, 43.499567], [-92.752088, 43.500084], [-92.707312, 43.500069], [-92.692786, 43.500063], [-92.689033, 43.500062], [-92.67258, 43.500055], [-92.653318, 43.50005], [-92.649194, 43.500049], [-92.553161, 43.5003], [-92.553128, 43.5003], [-92.464505, 43.500345], [-92.448948, 43.50042], [-92.408832, 43.500614], [-92.40613, 43.500476], [-92.388298, 43.500483], [-92.368908, 43.500454], [-92.279084, 43.500436], [-92.277425, 43.500466], [-92.198788, 43.500527], [-92.178863, 43.500713], [-92.103886, 43.500735], [-92.08997, 43.500684], [-92.079954, 43.500647], [-92.079802, 43.500647], [-91.949879, 43.500485], [-91.941837, 43.500554], [-91.824848, 43.500684], [-91.807156, 43.500648], [-91.804925, 43.500716], [-91.77929, 43.500803], [-91.777688, 43.500711], [-91.761414, 43.500637], [-91.738446, 43.500525], [-91.736558, 43.500561], [-91.73333, 43.500623], [-91.730359, 43.50068], [-91.730217, 43.50068], [-91.700749, 43.500581], [-91.670872, 43.500513], [-91.658401, 43.500533], [-91.651396, 43.500454], [-91.644924, 43.500529], [-91.639772, 43.500573], [-91.635626, 43.500463], [-91.634495, 43.500439], [-91.634244, 43.500479], [-91.625611, 43.500727], [-91.620785, 43.500677], [-91.617407, 43.500687], [-91.616895, 43.500663], [-91.615293, 43.50055], [-91.610895, 43.50053], [-91.610832, 43.50053], [-91.591073, 43.500536], [-91.551021, 43.500539], [-91.54122, 43.500515], [-91.533806, 43.50056], [-91.491042, 43.50069], [-91.465063, 43.500608], [-91.461403, 43.500642], [-91.445932, 43.500588], [-91.441786, 43.500438], [-91.37695, 43.500482], [-91.371608, 43.500945], [-91.369325, 43.500827], [-91.217706, 43.50055], [-91.20555, 43.422949], [-91.210233, 43.372064], [-91.107237, 43.313645], [-91.085652, 43.29187], [-91.057918, 43.255366], [-91.062562, 43.243165], [-91.1462, 43.152405], [-91.1562, 43.142945], [-91.175253, 43.134665], [-91.178251, 43.124982], [-91.177222, 43.080247], [-91.178087, 43.062044], [-91.175167, 43.041267], [-91.163064, 42.986781]]]}, "type": "Feature", "properties": {"CENSUSAREA": 55857.13, "STATE": "19", "LSAD": "", "NAME": "Iowa", "GEO_ID": "0400000US19"}}
# this notebook uses rasterio Shapes for processing, so lets convert that geojson to a shape
aoi_shape = sgeom.shape(iowa['geometry'])
aoi_shape
```
## Build Request
Build the Planet API Filter request.
Customize this code for your own purposes
```
DATE_START = datetime.datetime(year=2017,month=1,day=1)
DATE_END = datetime.datetime(year=2017,month=9,day=1)
def build_request(aoi_shape, date_start=DATE_START, date_end=DATE_END, addl_filters=None):
base_filters = [
filters.geom_filter(sgeom.mapping(aoi_shape)),
filters.range_filter('cloud_cover', lt=.1),
filters.date_range('acquired', gt=date_start),
filters.date_range('acquired', lt=date_end),
]
if addl_filters is not None:
base_filters += addl_filters
query = filters.and_filter(*base_filters)
item_types = ['PSOrthoTile']
return filters.build_search_request(query, item_types)
request = build_request(aoi_shape)
# print(request)
```
## Set Coverage Grid Dimensions
Set the grid dimensions according to the AOI shape and the resolution of interest
```
dimensions = (3000, 4000)
```
## Search Planet API
The client is how we interact with the planet api. It is created with the user-specific api key, which is pulled from $PL_API_KEY environment variable.
```
def get_api_key():
return os.environ['PL_API_KEY']
# quick check that key is defined
assert get_api_key(), "PL_API_KEY not defined."
import json
def create_client():
return api.ClientV1(api_key=get_api_key())
def search_pl_api(request, limit=500):
client = create_client()
result = client.quick_search(request)
# note that this returns a generator
return result.items_iter(limit=limit)
item = next(search_pl_api(build_request(aoi_shape), limit=1))
print(json.dumps(item['properties']))
```
## Calculate Coverage
First query the planet api for the items that match the request defined above, then calculate the overlap between each item and the aoi. Finally, convert each overlap to a grid using [`rasterio.rasterize`](https://mapbox.github.io/rasterio/topics/features.html#burning-shapes-into-a-raster), accumulate coverage over the overlap grids, and display the coverage grid.
```
def calculate_overlap(item, aoi_shape):
footprint_shape = sgeom.shape(item['geometry'])
return aoi_shape.intersection(footprint_shape)
def calculate_overlaps(items, aoi_shape):
item_num = 0
overlap_num = 0
for i in items:
item_num += 1
overlap = calculate_overlap(i, aoi_shape)
if not overlap.is_empty:
overlap_num += 1
yield overlap
print('{} overlaps from {} items'.format(overlap_num, item_num))
def calculate_coverage(overlaps, dimensions, bounds):
# get dimensions of coverage raster
mminx, mminy, mmaxx, mmaxy = bounds
y_count, x_count = dimensions
# determine pixel width and height for transform
width = (mmaxx - mminx) / x_count
height = (mminy - mmaxy) / y_count # should be negative
# Affine(a, b, c, d, e, f) where:
# a = width of a pixel
# b = row rotation (typically zero)
# c = x-coordinate of the upper-left corner of the upper-left pixel
# d = column rotation (typically zero)
# e = height of a pixel (typically negative)
# f = y-coordinate of the of the upper-left corner of the upper-left pixel
# ref: http://www.perrygeo.com/python-affine-transforms.html
transform = rasterio.Affine(width, 0, mminx, 0, height, mmaxy)
coverage = np.zeros(dimensions, dtype=np.uint16)
for overlap in overlaps:
if not overlap.is_empty:
# rasterize overlap vector, transforming to coverage raster
# pixels inside overlap have a value of 1, others have a value of 0
overlap_raster = rfeatures.rasterize(
[sgeom.mapping(overlap)],
fill=0,
default_value=1,
out_shape=dimensions,
transform=transform)
# add overlap raster to coverage raster
coverage += overlap_raster
return coverage
items = search_pl_api(request = build_request(aoi_shape),
limit=10000)
overlaps = calculate_overlaps(items, aoi_shape)
# cache coverage calculation because it takes a little while to create
coverage = calculate_coverage(overlaps, dimensions, aoi_shape.bounds)
from scipy import stats as sstats
import matplotlib.colors as colors
def plot_coverage(coverage):
fig, ax = plt.subplots(figsize=(15,10))
# ref: https://matplotlib.org/users/colormapnorms.html
pcm = ax.imshow(coverage,
interpolation='nearest',
norm=colors.LogNorm(vmin=max(1, coverage.min()), # avoid divide by zero
vmax=coverage.max()),
cmap=cm.viridis)
fig.colorbar(pcm, ax=ax, extend='max')
fig.show()
ax.set_title('Coverage\n(median: {})'.format(int(np.median(coverage))))
ax.axis('off')
plot_coverage(coverage)
```
Even when we limit the query to 10,000 scenes (there are more scenes than that, but it takes quite some time to process that many scenes), there are areas in Iowa that have 10-100 scenes of coverage. Pretty awesome!
| github_jupyter |
<a href="https://colab.research.google.com/github/kalz2q/mycolabnotebooks/blob/master/arabic01.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# メモ
コードセルでは %%html とすると、html が書けるのでそれを利用すると、
文字に色をつけたり、大きくしたりができる。
それを利用してアラビア語の勉強をする、というアイデア。
うまく表示できたら、メニュー -> 表示 -> コードを非表示、でコードを隠してしまえばじゃまにならない。
```
#@title
%%html
<span style="font-size: 300%; color: red;">اَلسَّلَامُ</span>
```
とりあえずduolingoのtipsを読み直そうという計画。
duolingo では english letters , arabic letters, meaning という構成になっているが、これを多少変形して、 latin , kana, arabic, meaning という json データベースにする。
次のような感じ。
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
latin: "a-s-salaamu",
kana: "アッサラーム",
arabic: "اَلسَّلَامُ",
meaning: "peace",
},
{
latin: "3alaykum",
kana: "あアライクム",
arabic: "عَلَيْكُمْ",
meaning: "on you",
},
{
latin: "SabaaH",
kana: "すぁバーふ",
arabic: "صَبَاح",
meaning: "morning",
}
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
# Alphabet 1
初めのうちはアルファベットの話。
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
latin: "d",
kana: "ド",
arabic: "د",
meaning: "日本語のドと同じ",
},
{
latin: "aa",
kana: "アー",
arabic: "ا",
meaning: "日本語のアーと同じ",
},
{
latin: "daa",
kana: "ダー",
arabic: "دا",
meaning: "日本語のダーと同じ",
}
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
母音と子音の関係は次の表で学ぶ。
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "daa",
"kana": "ダー",
"arabic": "دا",
"meaning": ""
},
{
"latin": "duu"
, "kana": "ドゥー"
, "arabic": "دو"
, "meaning": ""
},
{
"latin": "dii"
, "kana": "ディー"
, "arabic": "دي"
, "meaning": ""
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
短母音は次のようになる。
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "da",
"kana": "ダ",
"arabic": "دَ",
"meaning": ""
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
長母音のアルファベットは子音としても使われる。
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "zuu",
"kana": "ズー",
"arabic": "زو",
"meaning": ""
},
{
"latin": "zaw",
"kana": "ザウ",
"arabic": "زَو",
"meaning": ""
},
{
"latin": "wa",
"kana": "ワ",
"arabic": "وَ",
"meaning": ""
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "zii",
"kana": "ズィー",
"arabic": "زي",
"meaning": ""
},
{
"latin": "zay",
"kana": "ザイ",
"arabic": "زَي",
"meaning": ""
},
{
"latin": "ya",
"kana": "ヤ",
"arabic": "يَ",
"meaning": ""
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
# Alphabet 2
アラビア語のアルファベットは位置によって形が変わる。
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "b",
"kana": "ブ",
"arabic": "ب",
"meaning": "独立型"
},
{
"latin": "bar",
"kana": "バる",
"arabic": "بَر",
"meaning": "語頭"
},
{
"latin": "jabar",
"kana": "ジャバる",
"arabic": "جَبَر",
"meaning": "語中"
},
{
"latin": "rajab",
"kana": "らジャブ",
"arabic": "رَجَب",
"meaning": "語尾"
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
「ざ」の音
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "dh",
"kana": "ざ",
"arabic": "ذ",
"meaning": "dhaal, ざール, thの濁音"
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
# Alphabet 3
b と j 記号の形の変化
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "bob",
"kana": "ブーブ",
"arabic": "بوب",
"meaning": "Bob"
},
{
"latin": "juurj",
"kana": "ジューるジ",
"arabic": "جورج",
"meaning": "George"
},
{
"latin": "kabar",
"kana": "カバる",
"arabic": "كَبَر",
"meaning": ""
},
{
"latin": "kab",
"kana": "カブ",
"arabic": "كَب",
"meaning": ""
},
{
"latin": "kajad",
"kana": "カジャド",
"arabic": "كَجَد",
"meaning": ""
},
{
"latin": "kaj",
"kana": "カジ",
"arabic": "كَج",
"meaning": ""
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
# Alphabet 4
長母音と短母音で意味が変わる
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "jamal",
"kana": "ジャマル",
"arabic": "جَمَل",
"meaning": "camel"
},
{
"latin": "jamaal",
"kana": "ジャマール",
"arabic": "جَمال",
"meaning": "beauty"
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
冠詞や語形変化による単数複数の区別がない。
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "bayt",
"kana": "バイト",
"arabic": "بَيت",
"meaning": "house / a house"
},
{
"latin": "jaakiit",
"kana": "ジャーキート",
"arabic": "جاكيت",
"meaning": "jacket / a jacket"
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
形容詞は名詞の後に来る。
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "jakiit jadiid",
"kana": "ジャキート ジャディード",
"arabic": "جاكيت جديد",
"meaning": "a new jacket (literally:\"a jacket new\")"
},
{
"latin": "baab kabiir",
"kana": "バーブ カビーる",
"arabic": "باب كَبير",
"meaning": "a big door (literally:\"a door big\")"
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
# Descriptions 1
シャッダ: 促音のような子音を重ねて発音する記号。
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "daras",
"kana": "ダらス",
"arabic": "دَرَس",
"meaning": "he studied"
},
{
"latin": "darras",
"kana": "ダるらス",
"arabic": "دَرَّس",
"meaning": "he taught"
},
{
"latin": "Hamaam",
"kana": "はマーム",
"arabic": "حَمام",
"meaning": "pigeons"
},
{
"latin": "Hammaam",
"kana": "はンマーム",
"arabic": "حَمّام",
"meaning": "bathroom"
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "dajaaj",
"kana": "ダジャージ",
"arabic": "دَجاج ",
"meaning": "chicken(collective)"
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
# Descriptions 2
be動詞がない。
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "juurj sa3iid",
"kana": "ジューるジ サあイード",
"arabic": "جورج سَعيد",
"meaning": "George is happy"
},
{
"latin": "juudii min juubaa",
"kana": "ジューディー ミン ジューバー",
"arabic": "جودي مِن جوبا",
"meaning": "Judy is from Juba"
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "la",
"kana": "ラ",
"arabic": "لا",
"meaning": "laam + alef"
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
# Countries 1
繋がらない母音にはハムザをつける。また、母音で始まる語にはハムザがつけるのが普通。
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "2a",
"kana": "2ア",
"arabic": "أَ",
"meaning": ""
},
{
"latin": "2u",
"kana": "2ウ",
"arabic": "أُ",
"meaning": ""
},
{
"latin": "2i",
"kana": "2イ",
"arabic": "إِ",
"meaning": ""
},
{
"latin": "2uu",
"kana": "2ウー",
"arabic": "أو",
"meaning": ""
},
{
"latin": "2ii",
"kana": "2イー",
"arabic": "إي",
"meaning": ""
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
# Omar Is ...
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "siith",
"kana": "スィーす",
"arabic": "سيث",
"meaning": "Seth (male name)"
},
{
"latin": "dhakiyy",
"kana": "ざキーイ",
"arabic": "ذَكِيّ",
"meaning": "smart"
},
{
"latin": "kabur ",
"kana": "カブゥる",
"arabic": "كَبُر",
"meaning": ""
},
{
"latin": "kabr",
"kana": "カブる",
"arabic": "كَبْر",
"meaning": ""
},
{
"latin": "masak",
"kana": "マサク",
"arabic": "مَسَك",
"meaning": ""
},
{
"latin": "mask",
"kana": "マスク",
"arabic": "مَسْك",
"meaning": ""
},
{
"latin": "jabar",
"kana": "ジャバる",
"arabic": "جَبَر",
"meaning": ""
},
{
"latin": "jabr",
"kana": "ジャブる",
"arabic": "جَبْر",
"meaning": ""
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "3ind juudii bayt",
"kana": "あインド ジューディー バイト",
"arabic": "عِنْد جودي بَيْت",
"meaning": "Judy has a house."
},
{
"latin": "3ind 3umar karaaj",
"kana": "あインド あウマる カらージ",
"arabic": "عِنْد عُمَر كَراج",
"meaning": "Omar has a garage."
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
# Countries 2
ハムザのついている語頭の 2a、2i、2u のアレフは発音しない。
語中にもハムザがでてくるがどんな意味なのかはとりあえず今は気にしないでいい。
# Phrases
語尾のシュクランは発音しないアレフの上についたり、アレフの前の文字についたりするがいずれにせよアレフは発音せず、an と読む。
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "shukuran",
"kana": "シュクらン",
"arabic": "شُكْراً",
"meaning": "thank you"
},
{
"latin": "SabaaHan",
"kana": "すぁバーはン",
"arabic": "صَباحاً",
"meaning": "in the morning"
},
{
"latin": "masaa2an",
"kana": "マサー2アン",
"arabic": "مَساءً",
"meaning": "in the evening"
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "3arabiyy",
"kana": "あアらビーイ",
"arabic": "عَرَبِيّ",
"meaning": "an Arab country"
},
{
"latin": "2ahlan yaa 3umar",
"kana": "2アハラン ヤー あウマる",
"arabic": "أَهْلاً يا عُمَر!",
"meaning": "Hello, Omar"
},
{
"latin": "shukuran yaa karii",
"kana": "シュクらン ヤー カりー",
"arabic": "شُكْراً يا كَري!",
"meaning": "Thank you, Carrie"
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "2anti",
"kana": "2アンティ",
"arabic": "أَنْتِ",
"meaning": "you (female)"
},
{
"latin": "2anta",
"kana": "2アンタ",
"arabic": "أَنْتَ",
"meaning": "you (male)"
},
{
"latin": "hiyya",
"kana": "ヒーヤ",
"arabic": "هِيَّ",
"meaning": "she"
},
{
"latin": "huwwa",
"kana": "フゥーワ",
"arabic": "هُوَّ",
"meaning": "he"
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
# Descriptions 3
アラビア語の名詞には性がある。 形容詞も性がある。
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "mutarjim dhakiyy",
"kana": "ムタるジム ざキーイ",
"arabic": "مُتَرْجِم ذَكِيّ",
"meaning": "a smart translator (male)"
},
{
"latin": "mutarjima dhakiyya",
"kana": "ムタるジマ ざキーヤ",
"arabic": "مُتَرْجِمة ذَكِيّة",
"meaning": "a smart translator (female)"
},
{
"latin": "2ustaadh 2amriikiyy",
"kana": "2ウスターず 2アムりーキーイ",
"arabic": "أُسْتاذ أَمْريكِيّ",
"meaning": "an American professor (male) "
},
{
"latin": "2ustaadha 2amriikiyya",
"kana": "2ウスターざ 2アムりーキーヤ",
"arabic": "أُسْتاذة أَمْريكِيّة",
"meaning": "an American professor (female)"
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
# You and Me
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "gh",
"kana": "ガ",
"arabic": "غ",
"meaning": "ghayn"
},
{
"latin": "a",
"kana": "ア",
"arabic": "ى",
"meaning": "アレフと同じ"
},
{
"latin": "3alaa",
"kana": "あアラー",
"arabic": "عَلى",
"meaning": "on, on top of"
},
{
"latin": "al-3arabiyya l-fuSHaa",
"kana": "アル あアらビーヤ ルフすはー",
"arabic": "اَلْعَرَبِيّة الْفُصْحى",
"meaning": "Standard Arabic"
},
{
"latin": "bariiTaanyaa l-kubraa",
"kana": "バりーたーニヤー ルクブラー",
"arabic": "بَريطانْيا الْكُبْرى",
"meaning": "Great Britain"
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "hal",
"kana": "ハル",
"arabic": "هَل",
"meaning": "文頭につけて疑問文にする"
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
# University
```
#@title
%%html
<style>
table {
border-collapse: collapse;
border: 2px solid rgb(200, 200, 200);
letter-spacing: 1px;
margin: auto;
}
td,
th {
border: 1px solid rgb(190, 190, 190);
padding: 10px 20px;
}
tr:nth-child(even) td {
background-color: rgb(250, 250, 250);
}
tr:nth-child(odd) td {
background-color: rgb(245, 245, 245);
}
tr td:nth-child(3) {
color: red;
font-size: 300%;
}
</style>
<div id="maintable"></div>
<script>
var json = [
{
"latin": "balid 3arabiiyy",
"kana": "バリド あアらビーイ",
"arabic": "بَلَد عَرَبِيّ",
"meaning": "an Arab country"
},
{
"latin": "madiina 3arabi-yya",
"kana": "マディーナ あアらビーヤ",
"arabic": "مَدينة عَرَبِيّة",
"meaning": "an Arab city"
},
{
"latin": "bayt jadiid",
"kana": "バイト ジャディード",
"arabic": "بَيْت جَديد",
"meaning": "a new house"
},
{
"latin": "jaami3a jadiida",
"kana": "ジャーミあア ジャディーダ",
"arabic": "جامِعة جَديدة",
"meaning": "a new university"
},
];
var table = document.createElement("table");
// table header
var tr = document.createElement("tr");
for (key in json[0]) {
var th = document.createElement("th");
th.textContent = key;
tr.appendChild(th);
}
table.appendChild(tr);
// table content
for (var i = 0; i < json.length; i++) {
var tr = document.createElement("tr");
for (key in json[0]) {
var td = document.createElement("td");
td.textContent = json[i][key];
tr.appendChild(td);
}
table.appendChild(tr);
}
// add table to maintable
document.getElementById("maintable").appendChild(table);
</script>
```
# Are You ...?
# いまここ
| github_jupyter |
# Topic Modeling with DARIAH topics
We use this python library to do topic modeling on the AO3 corpus: https://dariah-de.github.io/Topics/
Issue: the library is designed to work with simple .txt files, while we have an R environment.
We need to convert the R environment into .txt files: this can be done directly via Python!
## 1. Preparation
Install and call the libraries
```
!pip install dariah
!pip install pyreadr
!pip install langdetect
import dariah
import pyreadr
from langdetect import detect
import re
import seaborn as sns
import matplotlib.pyplot as plt
```
## 2. Corpus loading
Load the corpus from the R environment.
**Note:** you will have to upload the "AO3_corpus.RData" file in the "Files" panel on the left.
```
result = pyreadr.read_r('AO3_corpus.RData')
print(result.keys())
my_df = result["my_df"]
all_texts = result["all_texts"]["all_texts"] # this is to have a vector, not a dataframe
```
## 3. Corpus cleaning
Remove texts that are too short or not in English.
**Note:** this are the same operations already done for stylometry in R
```
# create unique ids
my_df["ID"] = my_df.index.values
# drop short text
my_df = my_df.drop(my_df[my_df.length < 1000].index)
# recognize language
my_df["lang"] = [detect(x) for x in my_df["incipit"]]
# remove non-English texts
my_df = my_df.drop(my_df[my_df.lang != 'en'].index)
# visualize
my_df.head()
```
## 4. Corpus creation
Now everything is ready to create the corpus as ".txt" files
```
# create new directory
!mkdir corpus
# loop on metadata (my_df) to write texts (all_texts)
for i in my_df["ID"]:
# define filename
author = re.sub(r'\W+', '', str(my_df.loc[i,'author']))
title = re.sub(r'\W+', '', str(my_df.loc[i,'title']))
filename = 'corpus/'+author+'_'+title+'.txt'
# write file
text_file = open(filename, 'w')
n = text_file.write(all_texts[i])
text_file.close()
```
## 5. Topic modeling
The training can start! (it might take a few minutes)
```
model, vis = dariah.topics(directory="corpus",
stopwords=100,
num_topics=10,
num_iterations=1000)
```
## 6. Results
Visualize the results (as tables and plots)
```
# table with all values
model.topic_document.head()
# see topic/document heatmap
%matplotlib inline
vis.topic_document()
# the plot might not be that good
# better use the seaborn package directly, instead of dariah's functions
plt.figure(figsize=(50,50))
sns.heatmap(model.topic_document, cmap="Blues")
plt.show()
# see the words that compose the topics
vis.topic("topic0")
# see topics in a document
vis.document("ocean_eyes_221_ChasingShadows")
```
| github_jupyter |
```
import urllib.request, json
with urllib.request.urlopen(
"https://api.steinhq.com/v1/storages/5e736c1db88d3d04ae0815b3/Raw_Data"
) as url:
data = json.loads(url.read().decode())
import pandas as pd
import re
from tqdm import tqdm
tqdm.pandas()
df = pd.DataFrame(data)
df["Notes"][30:35]
import spacy
nlp = spacy.load("en_core_web_sm")
from spacy.tokens import Span
def get_travel_status(span):
if span.label_ =="GPE":
prev_token = span.doc[span.start - 1]
if prev_token.text in ("from", "through", "via", "Via"):
return("from")
elif prev_token.text in ("to", "and"):
return("to")
return "to"
# Register the Span extension as 'travel_status'
Span.set_extension("travel_status", getter=get_travel_status, force=True)
from spacy.tokens import Span, Token
def get_nat(span):
if span.label_ =="NORP":
return span.text
# Register the Span extension as 'nationality'
Span.set_extension("nationality", getter=get_nat, force=True)
doc = nlp("Indian Tourist")
print([(ent.text, ent._.nationality) for ent in doc.ents])
from spacy.matcher import Matcher
def get_rel(token):
if token.text == "of":
prev_token = token.doc[token.i - 1]
prev2 = None
if token.i > 2:
prev2 = token.doc[token.i - 2]
if prev2.text.lower() == "and":
return f"{token.doc[token.i - 3]} {token.doc[token.i - 2]} {token.doc[token.i - 1]}"
if prev_token.text.lower() in ("members", "member"):
return "Family Member"
else:
return prev_token.text
# Register the Span extension as 'relationship'
Token.set_extension("relationship", getter=get_rel, force=True)
doc = nlp("friend and family of p23")
print([(ent.text, ent._.relationship) for ent in doc])
def extract_relationship(sent):
if not sent:
return []
s = re.sub(r'[^\w\s]',' ',sent)
doc = nlp(s)
for tok in doc:
if tok._.relationship:
return(tok._.relationship)
def extract_travel_place(sent):
if not sent:
return []
s = re.sub(r'[^\w\s]',' ',sent)
doc = nlp(s)
travel = []
for ent in doc.ents:
if ent._.travel_status:
travel.append(ent.text)
return travel
def extract_nationality(sent):
if not sent:
return []
s = re.sub(r'[^\w\s]',' ',sent)
doc = nlp(s)
nat = []
for ent in doc.ents:
if ent._.nationality:
nat.append(ent._.nationality)
return nat
import urllib.request, json
with urllib.request.urlopen(
"https://raw.githubusercontent.com/bhanuc/indian-list/master/state-city.json"
) as url:
state_city = json.loads(url.read().decode())
l = ["India", "Mumbai"]
for k, v in state_city.items():
l.append(k)
l = l+v
l= [ele.replace("*", "") for ele in l]
def extract_foreign(sent):
if not sent:
return []
s = re.sub(r'[^\w\s]',' ',sent)
doc = nlp(s)
is_foreign = []
for ent in doc.ents:
if ent.label_=="GPE":
is_foreign.append(not(ent.text in l))
return is_foreign
extract_foreign("India, China Italy, Japan, Pune, 1989 mountains Apple Meghana")
import re
def find_travelled(data):
df = data.copy()
df["Relationship"] = df["Notes"].progress_apply(extract_relationship)
df["Travel Place"] = df["Notes"].progress_apply(extract_travel_place)
df["Nationality"] = df["Notes"].progress_apply(extract_nationality)
df["is_foreign"] = df["Notes"].progress_apply(extract_foreign)
return df
find_travelled(df).to_csv("rel.csv")
```
| github_jupyter |
```
import sys
sys.path.append('..')
import os
import torch
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, KBinsDiscretizer
from sklearn.impute import SimpleImputer
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold
import time
from sklearn.metrics import accuracy_score
from sympy import simplify_logic
from sklearn.model_selection import train_test_split
import lens
from lens.utils.base import validate_network, set_seed, tree_to_formula
from lens.utils.layer import prune_logic_layers
from lens import logic
set_seed(0)
#%%
data = pd.read_csv('data/mimic-ii/full_cohort_data.csv')
# data.drop('hgb_first')
fs = [
'aline_flg',
'gender_num',
# 'hosp_exp_flg',
# 'icu_exp_flg',
# 'day_28_flg',
# 'censor_flg',
'sepsis_flg', 'chf_flg', 'afib_flg',
'renal_flg', 'liver_flg', 'copd_flg', 'cad_flg', 'stroke_flg',
'mal_flg', 'resp_flg',
]
features = fs
data1 = data[fs].values
imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
data1 = imp_mean.fit_transform(data1)
f2 = fs.copy()
f2.append('day_icu_intime')
f2.append('service_unit')
f2.append('day_28_flg')
f2.append('hospital_los_day')
f2.append('icu_exp_flg')
f2.append('hosp_exp_flg')
f2.append('censor_flg')
f2.append('mort_day_censored')
f2 = data.columns.difference(f2)
data2 = data[f2].values
imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
data2 = imp_mean.fit_transform(data2)
scaler = MinMaxScaler((0, 1))
data2 = scaler.fit_transform(data2)
features = features + list(f2)
est = KBinsDiscretizer(n_bins=3, encode='onehot-dense', strategy='uniform')
data2d = est.fit_transform(data2)
f2d = []
for feature in f2:
#f2d.append(feature + '_VLOW')
f2d.append(feature + '_LOW')
f2d.append(feature + '_NORMAL')
f2d.append(feature + '_HIGH')
#f2d.append(feature + '_VHIGH')
features = fs + f2d
datax = np.hstack((data1, data2d))
datay = data['day_28_flg'].values
x = torch.FloatTensor(datax)
y = torch.FloatTensor(datay)
print(x.shape)
print(y.shape)
results_dir = 'results_ll/mimic'
if not os.path.isdir(results_dir):
os.makedirs(results_dir)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# concepts = [f'c{i:03}' for i in range(x.shape[1])]
concepts = features
n_rep = 10
tot_epochs = 5001
prune_epochs = 2001
seed = 42
n_splits = 10
skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)
def train_nn(x_train, y_train, seed, device, l1=0.001, lr=0.001, verbose=False):
set_seed(seed)
x_train = x_train.to(device)
y_train = y_train.to(device)
layers = [
lens.nn.XLogic(x_train.size(1), 50, first=True),
torch.nn.LeakyReLU(),
torch.nn.Linear(50, 30),
torch.nn.LeakyReLU(),
torch.nn.Linear(30, 1),
torch.nn.LeakyReLU(),
lens.nn.XLogic(1, 1, top=True),
]
model = torch.nn.Sequential(*layers).to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
loss_form = torch.nn.BCELoss()
model.train()
need_pruning = True
for epoch in range(tot_epochs):
optimizer.zero_grad()
y_pred = model(x_train).squeeze()
loss = loss_form(y_pred, y_train)
for module in model.children():
if isinstance(module, lens.nn.XLogic):
loss += l1 * torch.norm(module.weight, 1)
loss += l1 * torch.norm(module.bias, 1)
loss.backward()
optimizer.step()
if epoch > prune_epochs and need_pruning:
lens.utils.layer.prune_logic_layers(model.to(device), fan_in=5, device=device)
need_pruning = False
# compute accuracy
if epoch % 100 == 0 and verbose:
y_pred_d = y_pred > 0.5
accuracy = y_pred_d.eq(y_train).sum().item() / y_train.size(0)
print(f'Epoch {epoch}: train accuracy: {accuracy:.4f}')
return model
def c_to_y(method, verbose=False):
methods = []
splits = []
explanations = []
model_accuracies = []
explanation_accuracies = []
explanation_fidelities = []
explanation_complexities = []
elapsed_times = []
for split, (trainval_index, test_index) in enumerate(skf.split(x.cpu().detach().numpy(), y.cpu().detach().numpy())):
print(f'Split [{split+1}/{n_splits}]')
x_trainval, x_test = torch.FloatTensor(x[trainval_index]), torch.FloatTensor(x[test_index])
y_trainval, y_test = torch.FloatTensor(y[trainval_index]), torch.FloatTensor(y[test_index])
x_train, x_val, y_train, y_val = train_test_split(x_trainval, y_trainval, test_size=0.3, random_state=42)
explanation, explanation_inv = '', ''
explanation_accuracy, explanation_accuracy_inv = 0, 0
if method == 'tree':
classifier = DecisionTreeClassifier(random_state=seed)
classifier.fit(x_train.detach().numpy(), y_train.detach().numpy())
y_preds = classifier.predict(x_test.detach().numpy())
model_accuracy = accuracy_score(y_test.detach().numpy(), y_preds)
explanation_accuracy = model_accuracy
target_class = 1
start = time.time()
explanation = tree_to_formula(classifier, concepts, target_class)
elapsed_time = time.time() - start
explanation_fidelity = 1.
explanation_complexity = lens.logic.complexity(explanation)
target_class_inv = 0
start = time.time()
explanation_inv = tree_to_formula(classifier, concepts, target_class_inv)
elapsed_time = time.time() - start
else:
model = train_nn(x_trainval, y_trainval, seed, device, verbose=False)
y_preds = model(x_test.to(device)).cpu().detach().numpy() > 0.5
model_accuracy = accuracy_score(y_test.cpu().detach().numpy(), y_preds)
# positive class
start = time.time()
class_explanation, class_explanations = lens.logic.explain_class(model.cpu(),
x_trainval.cpu(),
y_trainval.cpu(),
binary=True, target_class=1,
topk_explanations=3)
elapsed_time = time.time() - start
if class_explanation:
explanation = logic.base.replace_names(class_explanation, concepts)
explanation_accuracy, y_formula = logic.base.test_explanation(class_explanation,
target_class=1,
x=x_test, y=y_test,
metric=accuracy_score)
explanation_fidelity = lens.logic.fidelity(y_formula, y_preds)
explanation_complexity = lens.logic.complexity(class_explanation)
if verbose:
print(f'\t Model\'s accuracy: {model_accuracy:.4f}')
print(f'\t Class 1 - Global explanation: "{explanation}" - Accuracy: {explanation_accuracy:.4f}')
print(f'\t Fidelity: "{explanation_fidelity:.4f}" - Complexity: "{explanation_complexity}"')
print(f'\t Elapsed time {elapsed_time}')
methods.append(method)
splits.append(seed)
explanations.append(explanation)
model_accuracies.append(model_accuracy)
explanation_accuracies.append(explanation_accuracy)
explanation_fidelities.append(explanation_fidelity)
explanation_complexities.append(explanation_complexity)
elapsed_times.append(elapsed_time)
explanation_consistency = lens.logic.formula_consistency(explanations)
print(f'Consistency of explanations: {explanation_consistency:.4f}')
results = pd.DataFrame({
'method': methods,
'split': splits,
'explanation': explanations,
'model_accuracy': model_accuracies,
'explanation_accuracy': explanation_accuracies,
'explanation_fidelity': explanation_fidelities,
'explanation_complexity': explanation_complexities,
'explanation_consistency': explanation_consistency,
'elapsed_time': elapsed_times,
})
results.to_csv(os.path.join(results_dir, f'results_{method}.csv'))
return results
```
# General pruning
```
results_pruning = c_to_y(method='logic_layer', verbose=True)
results_pruning
```
# Decision tree
```
results_tree = c_to_y(method='tree', verbose=False)
results_tree
```
# Summary
```
cols = ['model_accuracy', 'explanation_accuracy', 'explanation_fidelity',
'explanation_complexity', 'elapsed_time', 'explanation_consistency']
mean_cols = [f'{c}_mean' for c in cols]
sem_cols = [f'{c}_sem' for c in cols]
# pruning
df_mean = results_pruning[cols].mean()
df_sem = results_pruning[cols].sem()
df_mean.columns = mean_cols
df_sem.columns = sem_cols
summary_pruning = pd.concat([df_mean, df_sem])
summary_pruning.name = 'pruning'
# tree
df_mean = results_tree[cols].mean()
df_sem = results_tree[cols].sem()
df_mean.columns = mean_cols
df_sem.columns = sem_cols
summary_tree = pd.concat([df_mean, df_sem])
summary_tree.name = 'tree'
summary = pd.concat([summary_pruning,
summary_tree], axis=1).T
summary.columns = mean_cols + sem_cols
summary.to_csv(os.path.join(results_dir, 'summary.csv'))
summary
```
| github_jupyter |
<h2>Fashion MNIST dataset in Keras library</h2>
## Imports
```
# - TensorFlow
import tensorflow as tf
# - Dataset
from tensorflow.keras.datasets import fashion_mnist
# - Helper libraries
import numpy as np
import pandas as pd
import time
from sklearn.metrics import confusion_matrix
from tensorflow.keras.utils import to_categorical
# - Graph plots
%matplotlib inline
from matplotlib import pyplot as plt
# - Keras models and layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Dropout, Convolution2D, MaxPooling2D
# Print some stuff
print("TensorFlow version:", tf.__version__)
```
<h2>Load data</h2>
```
# Load pre-shuffled Fashion MNIST data into train and test sets
(X_train, y_train_raw), (X_test, y_test_raw) = fashion_mnist.load_data()
# Convert to float values and normalize between 0...1
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# Preprocess class labels
# Convert labels to 10-dimensional one-hot vectors
y_train = to_categorical(y_train_raw, 10)
y_test = to_categorical(y_test_raw, 10)
print("Training instances: {}".format(X_train.shape[0]))
print("Test instances: {}".format(X_test.shape[0]))
print()
print("Structure of training set: {}".format(X_train.shape))
print("Structure of labels set: {}".format(y_train.shape))
```
<h2>Pre-process data for 1D or 2D inputs</h2>
```
# Preprocess for 1D input data
X_train_1D = X_train.reshape(X_train.shape[0], 784)
X_test_1D = X_test.reshape(X_test.shape[0], 784)
print("1D shape:", X_train_1D.shape)
# Preprocess for 2D input data
X_train_2D = X_train.reshape(X_train.shape[0], 28, 28, 1)
X_test_2D = X_test.reshape(X_test.shape[0], 28, 28, 1)
print("2D shape:", X_train_2D.shape)
```
## Linear model
```
# Create Linear Softmax model
lin_clf = Sequential()
lin_clf.add(Dense(10, input_dim=784, activation="softmax"))
# Compile model
lin_clf.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# Start timer
start = time.time()
# Train model on training data
history = lin_clf.fit(X_train_1D, y_train, batch_size=64, epochs=6, validation_split=0.1, verbose=1)
# Evaluate model on test data
score = lin_clf.evaluate(X_test_1D, y_test, verbose=0)
# Stop timer
end = time.time()
# Print results
print("\nTest Accuracy: {0:0.2f}%".format(score[1] * 100))
print("Time elapsed: {0:0.2f} sec".format(end - start))
# Plot training history
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
```
## NN model
```
# Create Neural Net Softmax model
nn_clf = Sequential()
# - first Dense
nn_clf.add(Dense(128, input_dim=784, activation="relu", kernel_initializer="normal"))
nn_clf.add(Dropout(0.2))
# - second Dense
nn_clf.add(Dense(128, activation="relu", kernel_initializer="he_normal"))
nn_clf.add(Dropout(0.2))
# - output
nn_clf.add(Dense(10, activation="softmax"))
# Compile model
nn_clf.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# Start timer
start = time.time()
# Train model on training data
history = nn_clf.fit(X_train_1D, y_train, batch_size=64, epochs=8, validation_split=0.1, verbose=1)
# Evaluate model on test data
score = nn_clf.evaluate(X_test_1D, y_test, verbose=0)
# Stop timer
end = time.time()
# Print results
print("\nTest Accuracy: {0:0.2f}%".format(score[1] * 100))
print("Time elapsed: {0:0.2f} sec".format(end - start))
# Plot training history
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
```
## ConvNet model
```
# Create ConvNet model
cnn_clf = Sequential()
# - first conv/pool
cnn_clf.add(Convolution2D(32, (3,3), input_shape=(28,28,1), activation="relu", padding="same", kernel_initializer="he_uniform"))
cnn_clf.add(MaxPooling2D(pool_size=(2,2))) # reduces size to 14x14
cnn_clf.add(Dropout(0.3))
# - second conv/pool
cnn_clf.add(Convolution2D(64, (3,3), activation="relu", padding="same", kernel_initializer="he_uniform"))
cnn_clf.add(MaxPooling2D(pool_size=(2,2))) # reduces size to 7x7
cnn_clf.add(Dropout(0.3))
# - fully connected
cnn_clf.add(Flatten())
cnn_clf.add(Dense(128, activation="relu"))
cnn_clf.add(Dropout(0.3))
# - output
cnn_clf.add(Dense(10, activation="softmax"))
# Compile model
cnn_clf.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# Start timer
start = time.time()
# Train model on training data
history = cnn_clf.fit(X_train_2D, y_train, batch_size=64, epochs=8, validation_split=0.1, verbose=1)
# Evaluate model on test data
score = cnn_clf.evaluate(X_test_2D, y_test, verbose=0)
# Stop timer
end = time.time()
# Print results
print("\nTest Accuracy: {0:0.2f}%".format(score[1] * 100))
print("Time elapsed: {0:0.2f} sec".format(end - start))
# Plot training history
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
```
<h2>Confusion matrix</h2>
```
# Make predictions
y_pred = cnn_clf.predict(X_test_2D)
# Confusion matrix
conf_mx = confusion_matrix(
np.argmax(y_test,axis=1),
np.argmax(y_pred, axis=1))
print(conf_mx)
```
<h2>Predict examples</h2>
```
class_names = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat",
"Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]
# Examples to classify
examples = X_test_2D[:3]
res = np.argmax(cnn_clf.predict(examples), axis=1)
for i in range(0,len(res)):
print("Predicted:",class_names[res[i]])
print("Actual:",class_names[y_test_raw[i]])
plt.imshow(X_test[i], cmap=plt.cm.Blues)
plt.show()
```
| github_jupyter |
# Demo: Using VGG with Keras
Below, you'll be able to check out the predictions from an ImageNet pre-trained VGG network with Keras.
### Load some example images
```
# Load our images first, and we'll check what we have
from glob import glob
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
# Visualizations will be shown in the notebook.
%matplotlib inline
image_paths = glob('images/*.jpg')
# Print out the image paths
print(image_paths)
# Store paths for later
baca_path = image_paths[1]
tajfun_path = image_paths[3]
# View dogos images:
baca_path = image_paths[1]
img = mpimg.imread(baca_path)
plt.imshow(img)
plt.show()
img = mpimg.imread(tajfun_path)
plt.imshow(img)
plt.show()
# utility for owczrek podhalański
tatra_sheep_dog_name = 'Polish_Tatra_Sheepdog'
dog_name_converter = {'kuvasz':tatra_sheep_dog_name,
'Old_English_sheepdog':tatra_sheep_dog_name,
'Great_Pyrenees': tatra_sheep_dog_name}
```
### Pre-process an image
Note that the `image.load_img()` function will re-size our image to 224x224 as desired for input into this VGG16 model, so the images themselves don't have to be 224x224 to start.
```
# Here, we'll load an image and pre-process it
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
import numpy as np
img_paths = [baca_path, tajfun_path]
inputs = []
for img_path in img_paths:
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
inputs.append(x)
```
### Load VGG16 pre-trained model
We won't throw out the top fully-connected layer this time when we load the model, as we actually want the true ImageNet-related output. However, you'll learn how to do this in a later lab. The inference will be a little slower than you might expect here as we are not using GPU just yet.
Note also the use of `decode_predictions` which will map the prediction to the class name.
```
# Note - this will likely need to download a new version of VGG16
from keras.applications.vgg16 import VGG16, decode_predictions
# Load the pre-trained model
model = VGG16(weights='imagenet')
# Prepare plots
n_images = len(inputs)
fig, axes = plt.subplots(n_images, 3)
fig.set_size_inches((8.27, 11.69)[::-1])
# Perform inference on our pre-processed images
for i,x in enumerate(inputs):
predictions = model.predict(x)
# Check the top 3 predictions of the model
print('Predicted:', decode_predictions(predictions, top=3)[0])
axes[i, 0].imshow(mpimg.imread(img_paths[i]))
axes[i, 0].axis('off')
axes[i, 1].text(0, 0, 'is probably: ', fontweight='bold')
axes[i, 1].axis('off')
try:
axes[i, 2].text(0, 0, dog_name_converter[decode_predictions(predictions, top=3)[0][0][1]], fontweight='bold')
except:
axes[i, 2].text(0, 0, decode_predictions(predictions, top=3)[0][0][1], fontweight='bold')
axes[i, 2].axis('off')
plt.show()
```
You should mostly get the correct answers here. In our own run, it predicted a Tusker elephant with an African elephant in second place (the image is of an African elephant), correctly selected a labrador, and very confidently predicted a zebra. You can add some of your own images into the `images/` folder by clicking on the jupyter logo in the top left and see how it performs on your own examples!
| github_jupyter |
<img src="../../images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left">
# Qiskit Aer: Simulators
The latest version of this notebook is available on https://github.com/Qiskit/qiskit-tutorials.
## Introduction
This notebook shows how to import *Qiskit Aer* simulator backends and use them to execute ideal (noise free) Qiskit Terra circuits.
```
import numpy as np
# Import Qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import Aer, execute
from qiskit.tools.visualization import plot_histogram, plot_state_city
```
## Qiskit Aer simulator backends
Qiskit Aer currently includes three high performance simulator backends:
* `QasmSimulator`: Allows ideal and noisy multi-shot execution of qiskit circuits and returns counts or memory
* `StatevectorSimulator`: Allows ideal single-shot execution of qiskit circuits and returns the final statevector of the simulator after application
* `UnitarySimulator`: Allows ideal single-shot execution of qiskit circuits and returns the final unitary matrix of the circuit itself. Note that the circuit cannot contain measure or reset operations for this backend
These backends are found in the `Aer` provider with the names `qasm_simulstor`, `statevector_simulator` and `unitary_simulator` respectively
```
# List Aer backends
Aer.backends()
```
The simulator backends can also be directly and may be imported from `qiskit.providers.aer`
```
from qiskit.providers.aer import QasmSimulator, StatevectorSimulator, UnitarySimulator
```
## QasmSimulator
The `QasmSimulator` backend is designed to mimic an actual device. It executes a Qiskit `QuantumCircuit` and returns a count dictionary containing the final values of any classical registers in the circuit. The circuit may contain *gates*
*measure*, *reset*, *conditionals*, and other advanced simulator options that will be discussed in another notebook.
### Simulating a quantum circuit
The basic operation executes a quantum circuit and returns a counts dictionary of measurement outcomes. Here we execute a simple circuit that prepares a 2-qubit Bell-state $|\psi\rangle = \frac{1}{2}(|0,0\rangle + |1,1 \rangle)$ and measures both qubits.
```
# Construct quantum circuit
qr = QuantumRegister(2, 'qr')
cr = ClassicalRegister(2, 'cr')
circ = QuantumCircuit(qr, cr)
circ.h(qr[0])
circ.cx(qr[0], qr[1])
circ.measure(qr, cr)
# Select the QasmSimulator from the Aer provider
simulator = Aer.get_backend('qasm_simulator')
# Execute and get counts
result = execute(circ, simulator).result()
counts = result.get_counts(circ)
plot_histogram(counts, title='Bell-State counts')
```
### Returning measurements outcomes for each shot
The `QasmSimulator` also supports returning a list of measurement outcomes for each individual shot. This is enabled by setting the keyword argument `memory=True` in the `compile` or `execute` function.
```
# Construct quantum circuit
qr = QuantumRegister(2, 'qr')
cr = ClassicalRegister(2, 'cr')
circ = QuantumCircuit(qr, cr)
circ.h(qr[0])
circ.cx(qr[0], qr[1])
circ.measure(qr, cr)
# Select the QasmSimulator from the Aer provider
simulator = Aer.get_backend('qasm_simulator')
# Execute and get memory
result = execute(circ, simulator, shots=10, memory=True).result()
memory = result.get_memory(circ)
print(memory)
```
### Starting simulation with a custom initial state
The `QasmSimulator` allows setting a custom initial statevector for the simulation. This means that all experiments in a Qobj will be executed starting in a state $|\psi\rangle$ rather than the all zero state $|0,0,..0\rangle$. The custom state may be set using the `backend_options` keyword argument for `execute`, or the Aer backend `run` method.
**Note:**
* The initial statevector must be a valid quantum state $|\langle\psi|\psi\rangle|=1$. If not an exception will be raised.
* If a Qobj contains multiple circuits, the initial statevector must be the correct size for *all* experiments in the Qobj, otherwise an exception will be raised.
We now demonstate this functionality be executing an empty circuit, but setting the simulator to be initialized in the the final Bell-state of the previous example:
```
# Construct an empty quantum circuit
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
circ = QuantumCircuit(qr, cr)
circ.measure(qr, cr)
# Set the initial state
opts = {"initial_statevector": np.array([1, 0, 0, 1] / np.sqrt(2))}
# Select the QasmSimulator from the Aer provider
simulator = Aer.get_backend('qasm_simulator')
# Execute and get counts
result = execute(circ, simulator, backend_options=opts).result()
counts = result.get_counts(circ)
plot_histogram(counts, title="Bell initial statevector")
```
## StatevectorSimulator
The `StatevectorSimulator` executes a single shot of a Qiskit `QuantumCircuit` and returns the final quantum statevector of the simulation. The circuit may contain *gates*, and also *measure*, *reset*, and *conditional* operations.
### Simulating a quantum circuit
The basic operation executes a quantum circuit and returns a counts dictionary of measurement outcomes. Here we execute a simple circuit that prepares a 2-qubit Bell-state $|\psi\rangle = \frac{1}{2}(|0,0\rangle + |1,1 \rangle)$ and measures both qubits.
```
# Construct quantum circuit without measure
qr = QuantumRegister(2, 'qr')
circ = QuantumCircuit(qr)
circ.h(qr[0])
circ.cx(qr[0], qr[1])
# Select the StatevectorSimulator from the Aer provider
simulator = Aer.get_backend('statevector_simulator')
# Execute and get counts
result = execute(circ, simulator).result()
statevector = result.get_statevector(circ)
plot_state_city(statevector, title='Bell state')
```
### Simulating a quantum circuit with measurement
Note that if a circuit contains *measure* or *reset* the final statevector will be a conditional statevector *after* simulating wave-function collapse to the outcome of a measure or reset. For the Bell-state circuit this means the final statevector will be *either* $|0,0\rangle$ *or* $|1, 1\rangle$.
```
# Construct quantum circuit with measure
qr = QuantumRegister(2, 'qr')
cr = ClassicalRegister(2, 'cr')
circ = QuantumCircuit(qr, cr)
circ.h(qr[0])
circ.cx(qr[0], qr[1])
circ.measure(qr, cr)
# Select the StatevectorSimulator from the Aer provider
simulator = Aer.get_backend('statevector_simulator')
# Execute and get counts
result = execute(circ, simulator).result()
statevector = result.get_statevector(circ)
plot_state_city(statevector, title='Bell state post-measurement')
```
### Starting simulation with a custom initial state
Like the `QasmSimulator`, the `StatevectorSimulator` also allows setting a custom initial statevector for the simulation. Here we run the previous initial statevector example on the `StatevectorSimulator` to initialize it to the Bell state and execute an empty (identity) circuit.
```
# Construct an empty quantum circuit
qr = QuantumRegister(2)
circ = QuantumCircuit(qr)
circ.iden(qr)
# Set the initial state
opts = {"initial_statevector": np.array([1, 0, 0, 1] / np.sqrt(2))}
# Select the StatevectorSimulator from the Aer provider
simulator = Aer.get_backend('statevector_simulator')
# Execute and get counts
result = execute(circ, simulator, backend_options=opts).result()
statevector = result.get_statevector(circ)
plot_state_city(statevector, title="Bell initial statevector")
```
## Unitary Simulator
The `UnitarySimulator` constructs the unitary matrix for a Qiskit `QuantumCircuit` by applying each gate matrix to an identity matrix. The circuit may only contain *gates*, if it contains *resets* or *measure* operations an exception will be raised.
### Simulating a quantum circuit unitary
For this example we will return the unitary matrix corresponding to the previous examples circuit which prepares a bell state.
```
# Construct an empty quantum circuit
qr = QuantumRegister(2)
circ = QuantumCircuit(qr)
circ.h(qr[0])
circ.cx(qr[0], qr[1])
# Select the UnitarySimulator from the Aer provider
simulator = Aer.get_backend('unitary_simulator')
# Execute and get counts
result = execute(circ, simulator).result()
unitary = result.get_unitary(circ)
print("Circuit unitary:\n", unitary)
```
### Setting a custom initial unitary
we may also set an initial state for the `UnitarySimulator`, however this state is an initial *unitary matrix* $U_i$, not a statevector. In this case the return unitary will be $U.U_i$ given by applying the circuit unitary to the initial unitary matrix.
**Note:**
* The initial unitary must be a valid unitary matrix $U^\dagger.U =\mathbb{1}$. If not an exception will be raised.
* If a Qobj contains multiple experiments, the initial unitary must be the correct size fo *all* experiments in the Qobj, otherwise an exception will be raised.
Let us consider preparing the output unitary of the previous circuit as the initial state for the simulator:
```
# Construct an empty quantum circuit
qr = QuantumRegister(2)
circ = QuantumCircuit(qr)
circ.iden(qr)
# Set the initial unitary
opts = {"initial_unitary": np.array([[ 1, 1, 0, 0],
[ 0, 0, 1, -1],
[ 0, 0, 1, 1],
[ 1, -1, 0, 0]] / np.sqrt(2))}
# Select the UnitarySimulator from the Aer provider
simulator = Aer.get_backend('unitary_simulator')
# Execute and get counts
result = execute(circ, simulator, backend_options=opts).result()
unitary = result.get_unitary(circ)
unitary = result.get_unitary(circ)
print("Initial Unitary:\n", unitary)
```
| github_jupyter |
# Example 3: Normalize data to MNI template
This example covers the normalization of data. Some people prefer to normalize the data during the preprocessing, just before smoothing. I prefer to do the 1st-level analysis completely in subject space and only normalize the contrasts for the 2nd-level analysis. But both approaches are fine.
For the current example, we will take the computed 1st-level contrasts from the previous experiment (again once done with fwhm=4mm and fwhm=8mm) and normalize them into MNI-space. To show two different approaches, we will do the normalization once with ANTs and once with SPM.
## Preparation
Before we can start with the ANTs example, we first need to download the already computed deformation field. The data can be found in the `derivatives/fmriprep` folder of the dataset and can be downloaded with the following `datalad` command:
```
%%bash
datalad get -J 4 -d /data/ds000114 /data/ds000114/derivatives/fmriprep/sub-0[2345789]/anat/*h5
```
**Note:** This might take a while, as datalad needs to download ~710MB of data
### Alternatively: Prepare yourself
We're using the precomputed warp field from [fmriprep](http://fmriprep.readthedocs.io), as this step otherwise would take up to 10 hours or more for all subjects to complete. If you're nonetheless interested in computing the warp parameters with ANTs yourself, without using [fmriprep](http://fmriprep.readthedocs.io), either check out the script [ANTS_registration.py](https://github.com/miykael/nipype_tutorial/blob/master/notebooks/scripts/ANTS_registration.py) or even quicker, use [RegistrationSynQuick](http://nipype.readthedocs.io/en/latest/interfaces/generated/interfaces.ants/registration.html#registrationsynquick), Nipype's implementation of `antsRegistrationSynQuick.sh`.
## Normalization with ANTs
The normalization with ANTs requires that you first compute the transformation matrix that would bring the anatomical images of each subject into template space. Depending on your system this might take a few hours per subject. To facilitate this step, the transformation matrix is already computed for the T1 images.
The data for it can be found under:
```
!ls /data/ds000114/derivatives/fmriprep/sub-*/anat/*h5
```
**Now, let's start with the ANTs normalization workflow!**
## Imports (ANTs)
First, we need to import all the modules we later want to use.
```
from os.path import join as opj
from nipype import Workflow, Node, MapNode
from nipype.interfaces.ants import ApplyTransforms
from nipype.interfaces.utility import IdentityInterface
from nipype.interfaces.io import SelectFiles, DataSink
from nipype.interfaces.fsl import Info
```
## Experiment parameters (ANTs)
It's always a good idea to specify all parameters that might change between experiments at the beginning of your script. And remember that we decided to run the group analysis without subject ``sub-01``, ``sub-06`` and ``sub-10`` because they are left-handed (see [this section](https://miykael.github.io/nipype_tutorial/notebooks/example_1stlevel.html#Special-case)).
```
experiment_dir = '/output'
output_dir = 'datasink'
working_dir = 'workingdir'
# list of subject identifiers (remember we use only right handed subjects)
subject_list = ['02', '03', '04', '05', '07', '08', '09']
# task name
task_name = "fingerfootlips"
# Smoothing widths used during preprocessing
fwhm = [4, 8]
# Template to normalize to
template = '/data/ds000114/derivatives/fmriprep/mni_icbm152_nlin_asym_09c/1mm_T1.nii.gz'
```
**Note** if you're not using the corresponding docker image, than the **``template``** file might not be in your ``data`` directory. To get ``mni_icbm152_nlin_asym_09c``, either download it from this [website](https://files.osf.io/v1/resources/fvuh8/providers/osfstorage/580705089ad5a101f17944a9), unpack it and move it to ``/data/ds000114/derivatives/fmriprep/`` or run the following command in a cell:
```bash
%%bash
curl -L https://files.osf.io/v1/resources/fvuh8/providers/osfstorage/580705089ad5a101f17944a9 \
-o /data/ds000114/derivatives/fmriprep/mni_icbm152_nlin_asym_09c.tar.gz
tar xf /data/ds000114/derivatives/fmriprep/mni_icbm152_nlin_asym_09c.tar.gz \
-C /data/ds000114/derivatives/fmriprep/.
rm /data/ds000114/derivatives/fmriprep/mni_icbm152_nlin_asym_09c.tar.gz
```
## Specify Nodes (ANTs)
Initiate all the different interfaces (represented as nodes) that you want to use in your workflow.
```
# Apply Transformation - applies the normalization matrix to contrast images
apply2con = MapNode(ApplyTransforms(args='--float',
input_image_type=3,
interpolation='BSpline',
invert_transform_flags=[False],
num_threads=1,
reference_image=template,
terminal_output='file'),
name='apply2con', iterfield=['input_image'])
```
## Specify input & output stream (ANTs)
Specify where the input data can be found & where and how to save the output data.
```
# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['subject_id', 'fwhm_id']),
name="infosource")
infosource.iterables = [('subject_id', subject_list),
('fwhm_id', fwhm)]
# SelectFiles - to grab the data (alternativ to DataGrabber)
templates = {'con': opj(output_dir, '1stLevel',
'sub-{subject_id}/fwhm-{fwhm_id}', '???_00??.nii'),
'transform': opj('/data/ds000114/derivatives/fmriprep/', 'sub-{subject_id}', 'anat',
'sub-{subject_id}_t1w_space-mni152nlin2009casym_warp.h5')}
selectfiles = Node(SelectFiles(templates,
base_directory=experiment_dir,
sort_filelist=True),
name="selectfiles")
# Datasink - creates output folder for important outputs
datasink = Node(DataSink(base_directory=experiment_dir,
container=output_dir),
name="datasink")
# Use the following DataSink output substitutions
substitutions = [('_subject_id_', 'sub-')]
subjFolders = [('_fwhm_id_%ssub-%s' % (f, sub), 'sub-%s_fwhm%s' % (sub, f))
for f in fwhm
for sub in subject_list]
subjFolders += [('_apply2con%s/' % (i), '') for i in range(9)] # number of contrast used in 1stlevel an.
substitutions.extend(subjFolders)
datasink.inputs.substitutions = substitutions
```
## Specify Workflow (ANTs)
Create a workflow and connect the interface nodes and the I/O stream to each other.
```
# Initiation of the ANTs normalization workflow
antsflow = Workflow(name='antsflow')
antsflow.base_dir = opj(experiment_dir, working_dir)
# Connect up the ANTs normalization components
antsflow.connect([(infosource, selectfiles, [('subject_id', 'subject_id'),
('fwhm_id', 'fwhm_id')]),
(selectfiles, apply2con, [('con', 'input_image'),
('transform', 'transforms')]),
(apply2con, datasink, [('output_image', 'norm_ants.@con')]),
])
```
## Visualize the workflow (ANTs)
It always helps to visualize your workflow.
```
# Create ANTs normalization graph
antsflow.write_graph(graph2use='colored', format='png', simple_form=True)
# Visualize the graph
from IPython.display import Image
Image(filename=opj(antsflow.base_dir, 'antsflow', 'graph.png'))
```
## Run the Workflow (ANTs)
Now that everything is ready, we can run the ANTs normalization workflow. Change ``n_procs`` to the number of jobs/cores you want to use.
```
antsflow.run('MultiProc', plugin_args={'n_procs': 4})
```
# Normalization with SPM12
The normalization with SPM12 is rather straightforward. The only thing we need to do is run the Normalize12 module. **So let's start!**
## Imports (SPM12)
First, we need to import all the modules we later want to use.
```
from os.path import join as opj
from nipype.interfaces.spm import Normalize12
from nipype.interfaces.utility import IdentityInterface
from nipype.interfaces.io import SelectFiles, DataSink
from nipype.algorithms.misc import Gunzip
from nipype import Workflow, Node
```
## Experiment parameters (SPM12)
It's always a good idea to specify all parameters that might change between experiments at the beginning of your script. And remember that we decided to run the group analysis without subject ``sub-01``, ``sub-06`` and ``sub-10`` because they are left-handed (see [this section](https://miykael.github.io/nipype_tutorial/notebooks/example_1stlevel.html#Special-case)).
```
experiment_dir = '/output'
output_dir = 'datasink'
working_dir = 'workingdir'
# list of subject identifiers
subject_list = ['02', '03', '04', '05', '07', '08', '09']
# task name
task_name = "fingerfootlips"
# Smoothing withds used during preprocessing
fwhm = [4, 8]
template = '/opt/spm12-r7219/spm12_mcr/spm12/tpm/TPM.nii'
```
## Specify Nodes (SPM12)
Initiate all the different interfaces (represented as nodes) that you want to use in your workflow.
```
# Gunzip - unzip the anatomical image
gunzip = Node(Gunzip(), name="gunzip")
# Normalize - normalizes functional and structural images to the MNI template
normalize = Node(Normalize12(jobtype='estwrite',
tpm=template,
write_voxel_sizes=[1, 1, 1]),
name="normalize")
```
## Specify input & output stream (SPM12)
Specify where the input data can be found & where and how to save the output data.
```
# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['subject_id', 'fwhm_id']),
name="infosource")
infosource.iterables = [('subject_id', subject_list),
('fwhm_id', fwhm)]
# SelectFiles - to grab the data (alternativ to DataGrabber)
templates = {'con': opj(output_dir, '1stLevel',
'sub-{subject_id}/fwhm-{fwhm_id}', '???_00??.nii'),
'anat': opj('/data/ds000114/derivatives', 'fmriprep', 'sub-{subject_id}',
'anat', 'sub-{subject_id}_t1w_preproc.nii.gz')}
selectfiles = Node(SelectFiles(templates,
base_directory=experiment_dir,
sort_filelist=True),
name="selectfiles")
# Datasink - creates output folder for important outputs
datasink = Node(DataSink(base_directory=experiment_dir,
container=output_dir),
name="datasink")
# Use the following DataSink output substitutions
substitutions = [('_subject_id_', 'sub-')]
subjFolders = [('_fwhm_id_%ssub-%s' % (f, sub), 'sub-%s_fwhm%s' % (sub, f))
for f in fwhm
for sub in subject_list]
substitutions.extend(subjFolders)
datasink.inputs.substitutions = substitutions
```
## Specify Workflow (SPM12)
Create a workflow and connect the interface nodes and the I/O stream to each other.
```
# Specify Normalization-Workflow & Connect Nodes
spmflow = Workflow(name='spmflow')
spmflow.base_dir = opj(experiment_dir, working_dir)
# Connect up SPM normalization components
spmflow.connect([(infosource, selectfiles, [('subject_id', 'subject_id'),
('fwhm_id', 'fwhm_id')]),
(selectfiles, normalize, [('con', 'apply_to_files')]),
(selectfiles, gunzip, [('anat', 'in_file')]),
(gunzip, normalize, [('out_file', 'image_to_align')]),
(normalize, datasink, [('normalized_files', 'norm_spm.@files'),
('normalized_image', 'norm_spm.@image'),
]),
])
```
## Visualize the workflow (SPM12)
It always helps to visualize your workflow.
```
# Create SPM normalization graph
spmflow.write_graph(graph2use='colored', format='png', simple_form=True)
# Visualize the graph
from IPython.display import Image
Image(filename=opj(spmflow.base_dir, 'spmflow', 'graph.png'))
```
## Run the Workflow (SPM12)
Now that everything is ready, we can run the SPM normalization workflow. Change ``n_procs`` to the number of jobs/cores you want to use.
```
spmflow.run('MultiProc', plugin_args={'n_procs': 4})
```
# Comparison between ANTs and SPM normalization
Now that we ran the normalization with ANTs and SPM, let us compare their output.
```
from nilearn.plotting import plot_stat_map
%matplotlib inline
anatimg = '/data/ds000114/derivatives/fmriprep/mni_icbm152_nlin_asym_09c/1mm_T1.nii.gz'
```
First, let's compare the normalization of the **anatomical** images:
```
plot_stat_map(
'/data/ds000114/derivatives/fmriprep/sub-02/anat/sub-02_t1w_space-mni152nlin2009casym_preproc.nii.gz',
title='anatomy - ANTs (normalized to ICBM152)', bg_img=anatimg,
threshold=200, display_mode='ortho', cut_coords=(-50, 0, -10));
plot_stat_map(
'/output/datasink/norm_spm/sub-02_fwhm4/wsub-02_t1w_preproc.nii',
title='anatomy - SPM (normalized to SPM\'s TPM)', bg_img=anatimg,
threshold=200, display_mode='ortho', cut_coords=(-50, 0, -10));
```
And what about the **contrast** images for **Finger > others**?
```
plot_stat_map(
'/output/datasink/norm_ants/sub-02_fwhm8/con_0005_trans.nii', title='contrast5 - fwhm=8 - ANTs',
bg_img=anatimg, threshold=2, vmax=5, display_mode='ortho', cut_coords=(-39, -37, 56));
plot_stat_map(
'/output/datasink/norm_spm/sub-02_fwhm8/wcon_0005.nii', title='contrast5 - fwhm=8 - SPM',
bg_img=anatimg, threshold=2, vmax=5, display_mode='ortho', cut_coords=(-39, -37, 56));
from nilearn.plotting import plot_glass_brain
plot_glass_brain(
'/output/datasink/norm_ants/sub-02_fwhm8/con_0005_trans.nii', colorbar=True,
threshold=3, display_mode='lyrz', black_bg=True, vmax=6, title='contrast5 - fwhm=8 - ANTs')
plot_glass_brain(
'/output/datasink/norm_spm/sub-02_fwhm8/wcon_0005.nii', colorbar=True,
threshold=3, display_mode='lyrz', black_bg=True, vmax=6, title='contrast5 - fwhm=8 - SPM');
```
| github_jupyter |
## Practice: Mastering Kung-Fu and A2C
*This part is based on [Practical RL week08 practice](https://github.com/yandexdataschool/Practical_RL/tree/master/week08_pomdp). All rights belong to the original authors.*
```
import sys
if 'google.colab' in sys.modules:
!pip install scipy==1.0.1
!wget https://raw.githubusercontent.com/yandexdataschool/Practical_RL/0ccb0673965dd650d9b284e1ec90c2bfd82c8a94/week08_pomdp/atari_util.py
!wget https://raw.githubusercontent.com/yandexdataschool/Practical_RL/0ccb0673965dd650d9b284e1ec90c2bfd82c8a94/week08_pomdp/env_pool.py
# If you are running on a server, launch xvfb to record game videos
# Please make sure you have xvfb installed
import os
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
!bash ../xvfb start
os.environ['DISPLAY'] = ':1'
import torch
device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
import numpy as np
from IPython.core import display
import matplotlib.pyplot as plt
%matplotlib inline
```
### Kung-Fu, recurrent style
In this notebook we'll once again train RL agent for for atari [KungFuMaster](https://gym.openai.com/envs/KungFuMaster-v0/), this time using recurrent neural networks.

```
import gym
from atari_util import PreprocessAtari
def make_env():
env = gym.make("KungFuMasterDeterministic-v0")
env = PreprocessAtari(env, height=42, width=42,
crop=lambda img: img[60:-30, 15:],
color=False, n_frames=1)
return env
env = make_env()
obs_shape = env.observation_space.shape
n_actions = env.action_space.n
print("Observation shape:", obs_shape)
print("Num actions:", n_actions)
print("Action names:", env.env.env.get_action_meanings())
s = env.reset()
for _ in range(100):
s, _, _, _ = env.step(env.action_space.sample())
plt.title('Game image')
plt.imshow(env.render('rgb_array'))
plt.show()
plt.title('Agent observation')
plt.imshow(s.reshape([42, 42]))
plt.show()
```
### POMDP setting
The atari game we're working with is actually a POMDP: your agent needs to know timing at which enemies spawn and move, but cannot do so unless it has some memory.
Let's design another agent that has a recurrent neural net memory to solve this. Here's a sketch.

```
import torch
import torch.nn as nn
import torch.nn.functional as F
# a special module that converts [batch, channel, w, h] to [batch, units]
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class SimpleRecurrentAgent(nn.Module):
def __init__(self, obs_shape, n_actions, reuse=False):
"""A simple actor-critic agent"""
super(self.__class__, self).__init__()
self.conv0 = nn.Conv2d(1, 32, kernel_size=(3, 3), stride=(2, 2))
self.conv1 = nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2))
self.conv2 = nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2))
self.flatten = Flatten()
self.hid = nn.Linear(512, 128)
self.rnn = nn.LSTMCell(128, 128)
self.logits = nn.Linear(128, n_actions)
self.state_value = nn.Linear(128, 1)
def forward(self, prev_state, obs_t):
"""
Takes agent's previous hidden state and a new observation,
returns a new hidden state and whatever the agent needs to learn
"""
# Apply the whole neural net for one step here.
# See docs on self.rnn(...).
# The recurrent cell should take the last feedforward dense layer as input.
<YOUR CODE>
new_state = <YOUR CODE>
logits = <YOUR CODE>
state_value = <YOUR CODE>
return new_state, (logits, state_value)
def get_initial_state(self, batch_size):
"""Return a list of agent memory states at game start. Each state is a np array of shape [batch_size, ...]"""
return torch.zeros((batch_size, 128)), torch.zeros((batch_size, 128))
def sample_actions(self, agent_outputs):
"""pick actions given numeric agent outputs (np arrays)"""
logits, state_values = agent_outputs
probs = F.softmax(logits)
return torch.multinomial(probs, 1)[:, 0].data.numpy()
def step(self, prev_state, obs_t):
""" like forward, but obs_t is a numpy array """
obs_t = torch.tensor(np.asarray(obs_t), dtype=torch.float32)
(h, c), (l, s) = self.forward(prev_state, obs_t)
return (h.detach(), c.detach()), (l.detach(), s.detach())
n_parallel_games = 5
gamma = 0.99
agent = SimpleRecurrentAgent(obs_shape, n_actions).to(device)
state = [env.reset()]
_, (logits, value) = agent.step(agent.get_initial_state(1), state)
print("action logits:\n", logits)
print("state values:\n", value)
```
### Let's play!
Let's build a function that measures agent's average reward.
```
def evaluate(agent, env, n_games=1):
"""Plays an entire game start to end, returns session rewards."""
game_rewards = []
for _ in range(n_games):
# initial observation and memory
observation = env.reset()
prev_memories = agent.get_initial_state(1)
total_reward = 0
while True:
new_memories, readouts = agent.step(
prev_memories, observation[None, ...])
action = agent.sample_actions(readouts)
observation, reward, done, info = env.step(action[0])
total_reward += reward
prev_memories = new_memories
if done:
break
game_rewards.append(total_reward)
return game_rewards
import gym.wrappers
with gym.wrappers.Monitor(make_env(), directory="videos", force=True) as env_monitor:
rewards = evaluate(agent, env_monitor, n_games=3)
print(rewards)
# Show video. This may not work in some setups. If it doesn't
# work for you, you can download the videos and view them locally.
from pathlib import Path
from IPython.display import HTML
video_names = sorted([s for s in Path('videos').iterdir() if s.suffix == '.mp4'])
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format(video_names[-1])) # You can also try other indices
```
### Training on parallel games
We introduce a class called EnvPool - it's a tool that handles multiple environments for you. Here's how it works:

```
from env_pool import EnvPool
pool = EnvPool(agent, make_env, n_parallel_games)
```
We gonna train our agent on a thing called __rollouts:__

A rollout is just a sequence of T observations, actions and rewards that agent took consequently.
* First __s0__ is not necessarily initial state for the environment
* Final state is not necessarily terminal
* We sample several parallel rollouts for efficiency
```
# for each of n_parallel_games, take 10 steps
rollout_obs, rollout_actions, rollout_rewards, rollout_mask = pool.interact(10)
print("Actions shape:", rollout_actions.shape)
print("Rewards shape:", rollout_rewards.shape)
print("Mask shape:", rollout_mask.shape)
print("Observations shape: ", rollout_obs.shape)
```
# Actor-critic objective
Here we define a loss function that uses rollout above to train advantage actor-critic agent.
Our loss consists of three components:
* __The policy "loss"__
$$ \hat J = {1 \over T} \cdot \sum_t { \log \pi(a_t | s_t) } \cdot A_{const}(s,a) $$
* This function has no meaning in and of itself, but it was built such that
* $ \nabla \hat J = {1 \over N} \cdot \sum_t { \nabla \log \pi(a_t | s_t) } \cdot A(s,a) \approx \nabla E_{s, a \sim \pi} R(s,a) $
* Therefore if we __maximize__ J_hat with gradient descent we will maximize expected reward
* __The value "loss"__
$$ L_{td} = {1 \over T} \cdot \sum_t { [r + \gamma \cdot V_{const}(s_{t+1}) - V(s_t)] ^ 2 }$$
* Ye Olde TD_loss from q-learning and alike
* If we minimize this loss, V(s) will converge to $V_\pi(s) = E_{a \sim \pi(a | s)} R(s,a) $
* __Entropy Regularizer__
$$ H = - {1 \over T} \sum_t \sum_a {\pi(a|s_t) \cdot \log \pi (a|s_t)}$$
* If we __maximize__ entropy we discourage agent from predicting zero probability to actions
prematurely (a.k.a. exploration)
So we optimize a linear combination of $L_{td}$ $- \hat J$, $-H$
```
```
```
```
```
```
__One more thing:__ since we train on T-step rollouts, we can use N-step formula for advantage for free:
* At the last step, $A(s_t,a_t) = r(s_t, a_t) + \gamma \cdot V(s_{t+1}) - V(s) $
* One step earlier, $A(s_t,a_t) = r(s_t, a_t) + \gamma \cdot r(s_{t+1}, a_{t+1}) + \gamma ^ 2 \cdot V(s_{t+2}) - V(s) $
* Et cetera, et cetera. This way agent starts training much faster since it's estimate of A(s,a) depends less on his (imperfect) value function and more on actual rewards. There's also a [nice generalization](https://arxiv.org/abs/1506.02438) of this.
__Note:__ it's also a good idea to scale rollout_len up to learn longer sequences. You may wish set it to >=20 or to start at 10 and then scale up as time passes.
```
def to_one_hot(y, n_dims=None, device=device):
""" Take an integer tensor and convert it to 1-hot matrix. """
y_tensor = y.to(device).reshape(-1, 1)
n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1
y_one_hot = torch.zeros(y_tensor.size()[0], n_dims, device = device).scatter_(1, y_tensor, 1)
return y_one_hot
opt = torch.optim.Adam(agent.parameters(), lr=1e-5)
def train_on_rollout(states, actions, rewards, is_not_done, prev_memory_states, gamma=0.99):
"""
Takes a sequence of states, actions and rewards produced by generate_session.
Updates agent's weights by following the policy gradient above.
Please use Adam optimizer with default parameters.
"""
# shape: [batch_size, time, c, h, w]
states = torch.tensor(np.asarray(states), dtype=torch.float32)
actions = torch.tensor(np.array(actions), dtype=torch.int64) # shape: [batch_size, time]
rewards = torch.tensor(np.array(rewards), dtype=torch.float32) # shape: [batch_size, time]
is_not_done = torch.tensor(np.array(is_not_done), dtype=torch.float32) # shape: [batch_size, time]
rollout_length = rewards.shape[1] - 1
# predict logits, probas and log-probas using an agent.
memory = [m.detach() for m in prev_memory_states]
logits = [] # append logit sequence here
state_values = [] # append state values here
for t in range(rewards.shape[1]):
obs_t = states[:, t]
# use agent to comute logits_t and state values_t.
# append them to logits and state_values array
memory, (logits_t, values_t) = <YOUR CODE>
logits.append(logits_t)
state_values.append(values_t)
logits = torch.stack(logits, dim=1)
state_values = torch.stack(state_values, dim=1)
probas = F.softmax(logits, dim=2)
logprobas = F.log_softmax(logits, dim=2)
# select log-probabilities for chosen actions, log pi(a_i|s_i)
actions_one_hot = to_one_hot(actions, n_actions).view(
actions.shape[0], actions.shape[1], n_actions)
logprobas_for_actions = torch.sum(logprobas * actions_one_hot, dim=-1)
# Now let's compute two loss components:
# 1) Policy gradient objective.
# Notes: Please don't forget to call .detach() on advantage term. Also please use mean, not sum.
# it's okay to use loops if you want
J_hat = 0 # policy objective as in the formula for J_hat
# 2) Temporal difference MSE for state values
# Notes: Please don't forget to call on V(s') term. Also please use mean, not sum.
# it's okay to use loops if you want
value_loss = 0
cumulative_returns = state_values[:, -1].detach()
for t in reversed(range(rollout_length)):
r_t = rewards[:, t] # current rewards
# current state values
V_t = state_values[:, t]
V_next = state_values[:, t + 1].detach() # next state values
# log-probability of a_t in s_t
logpi_a_s_t = logprobas_for_actions[:, t]
# update G_t = r_t + gamma * G_{t+1} as we did in week6 reinforce
cumulative_returns = G_t = r_t + gamma * cumulative_returns
# Compute temporal difference error (MSE for V(s))
value_loss += <YOUR CODE>
# compute advantage A(s_t, a_t) using cumulative returns and V(s_t) as baseline
advantage = <YOUR CODE>
advantage = advantage.detach()
# compute policy pseudo-loss aka -J_hat.
J_hat += <YOUR CODE>
# regularize with entropy
entropy_reg = <YOUR CODE: compute entropy regularizer>
# add-up three loss components and average over time
loss = -J_hat / rollout_length +\
value_loss / rollout_length +\
-0.01 * entropy_reg
# Gradient descent step
<YOUR CODE>
return loss.data.numpy()
# let's test it
memory = list(pool.prev_memory_states)
rollout_obs, rollout_actions, rollout_rewards, rollout_mask = pool.interact(10)
train_on_rollout(rollout_obs, rollout_actions,
rollout_rewards, rollout_mask, memory)
```
# Train
just run train step and see if agent learns any better
```
from IPython.display import clear_output
from tqdm import trange
from pandas import DataFrame
moving_average = lambda x, **kw: DataFrame(
{'x': np.asarray(x)}).x.ewm(**kw).mean().values
rewards_history = []
for i in trange(15000):
memory = list(pool.prev_memory_states)
rollout_obs, rollout_actions, rollout_rewards, rollout_mask = pool.interact(
10)
train_on_rollout(rollout_obs, rollout_actions,
rollout_rewards, rollout_mask, memory)
if i % 100 == 0:
rewards_history.append(np.mean(evaluate(agent, env, n_games=1)))
clear_output(True)
plt.plot(rewards_history, label='rewards')
plt.plot(moving_average(np.array(rewards_history),
span=10), label='rewards ewma@10')
plt.legend()
plt.show()
if rewards_history[-1] >= 10000:
print("Your agent has just passed the minimum homework threshold")
break
```
Relax and grab some refreshments while your agent is locked in an infinite loop of violence and death.
__How to interpret plots:__
The session reward is the easy thing: it should in general go up over time, but it's okay if it fluctuates ~~like crazy~~. It's also OK if it reward doesn't increase substantially before some 10k initial steps. However, if reward reaches zero and doesn't seem to get up over 2-3 evaluations, there's something wrong happening.
Since we use a policy-based method, we also keep track of __policy entropy__ - the same one you used as a regularizer. The only important thing about it is that your entropy shouldn't drop too low (`< 0.1`) before your agent gets the yellow belt. Or at least it can drop there, but _it shouldn't stay there for long_.
If it does, the culprit is likely:
* Some bug in entropy computation. Remember that it is $ - \sum p(a_i) \cdot log p(a_i) $
* Your agent architecture converges too fast. Increase entropy coefficient in actor loss.
* Gradient explosion - just [clip gradients](https://stackoverflow.com/a/56069467) and maybe use a smaller network
* Us. Or PyTorch developers. Or aliens. Or lizardfolk. Contact us on forums before it's too late!
If you're debugging, just run `logits, values = agent.step(batch_states)` and manually look into logits and values. This will reveal the problem 9 times out of 10: you'll likely see some NaNs or insanely large numbers or zeros. Try to catch the moment when this happens for the first time and investigate from there.
### "Final" evaluation
```
import gym.wrappers
with gym.wrappers.Monitor(make_env(), directory="videos", force=True) as env_monitor:
final_rewards = evaluate(agent, env_monitor, n_games=20)
print("Final mean reward", np.mean(final_rewards))
# Show video. This may not work in some setups. If it doesn't
# work for you, you can download the videos and view them locally.
from pathlib import Path
from IPython.display import HTML
video_names = sorted([s for s in Path('videos').iterdir() if s.suffix == '.mp4'])
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format(video_names[-1])) # You can also try other indices
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import datetime as dt
```
# **Task** **1**
```
#dummy data for task 1
df = pd.read_csv('data.csv')
df = df.drop([1693,1694],axis=0)
def date_difference(dataframe):
# Note***/// This function will do the job for any format of date values in column however i tried but cannot figure out all possible ways to
# filter the date irrespective of known formats from text data(as in row 1693 and 1694 in dummy data) because of many format possible and process is too time consuming
#drop columns if they have empty values more than 30% of total data otherwise drop the rows ith missing values
for col in dataframe.columns:
if dataframe[col].isnull().sum()>(dataframe[col].size)*0.3:
dataframe = dataframe.drop(col,axis=1)
dataframe.dropna()
#converting columns with date into datetime values
dataframe = dataframe.apply(lambda col: pd.to_datetime(col,errors='ignore', exact=False,utc=True)
if col.dtype== 'object'
else col,
axis=0 )
#used for finding colummns with datetime values
a=[i for i in dataframe.columns if str(dataframe[i].dtypes)[:10] == 'datetime64']
#dropping columns with current timestamp due to error
for n,i in enumerate(a):
if dataframe[i][0] == dt.date.today():
dataframe = dataframe.drop(i,axis=1)
a.pop(n)
#taking only date from the timestamp columns if any
for i in a:
dataframe[i]= dataframe[i].dt.date
# find the difference between the dates as given and prohibit formation of multiple columns with same values
for i in range(len(a)-1):
dataframe["days({}-{})".format(a[i],a[i+1])] = dataframe[a[i]]- dataframe[a[i+1]]
dataframe["days({}-{})".format(a[i],a[i+1])] = dataframe["days({}-{})".format(a[i],a[i+1])].apply(lambda x: x.days)
if len(a)>2:
dataframe["days({}-{})".format(a[0],a[len(a)-1])] = dataframe[a[0]]-dataframe[a[-1]]
dataframe["days({}-{})".format(a[0],a[len(a)-1])] =dataframe["days({}-{})".format(a[0],a[len(a)-1])].apply(lambda x: x.days)
return dataframe
date_difference(df)
```
# **Task 2**
```
# dummy data for outlier detection
df = pd.DataFrame(np.linspace(500,1500,50))
df.rename(columns={0:'Salary'},inplace=True)
df1 = pd.DataFrame([5000.0,25000.0,1700.0,500000.5])
#df1.rename(columns={0:'Salary'},inplace=True)
data = df['Salary'].append(df1,ignore_index=True)
#method (IQR)
def remove_outliers(df1):
# remove the null values columns if exceeds threshold of 40%.
for col in df1.columns:
if df1[col].isnull().sum()>(df1[col].size)*0.4:
df1 = df1.drop(col,axis=1)
#otherwise drop rows with null values
df1.dropna(inplace=True)
#start outlier detection
columns = list(data.columns)
num_columns = []
outliers = []
for col in columns:
if data[col].dtype =='float64':
#now i use IQR method to detect outliers and replace them with 'NaN' values
b = sorted(data[col])
q1, q3= np.percentile(b,[25,75])
iqr = q3-q1
lower_limit = q1 -(1.5 * iqr)
upper_limit = q3 +(1.5 * iqr)
for y in data[col]:
if lower_limit<=y<=upper_limit:
continue
else:
outliers.append(y)
num_columns.append(col)
data[col].replace({y:np.nan},inplace=True)
# drop the row having outliers if the total outliers are below 30% of the size of numerical columns having outliers
if len(outliers)< (data[num_columns].size)*0.3:
data.dropna(inplace=True)
# else replace the outlier values with mean
else:
for i in num_columns:
data[i] = data[i].fillna(float(data[i].dropna().mean()))
return data
remove_outliers(data)
```
# **Task 3**
```
arr = np.linspace(500,1500,50).reshape(10,5)
df = pd.DataFrame(arr)
df
def remove_high_corr_columns(df1):
# remove the null values columns if exceeds threshold of 40%.
# Using this method minimum number of columns are dropped.
for col in df1.columns:
if df1[col].isnull().sum()>(df1[col].size)*0.4:
df1 = df1.drop(col,axis=1)
#drop rows with null values
df1.dropna(inplace=True)
#find the pearson correlation metrics
corr_matrix = df1.corr().abs()
#select the upper triangular metrics for singularity of values
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
# Find features with correlation greater than threshold
to_drop = [column for column in upper.columns if any(upper[column] > 0.85)]
# Drop columns with high pearson correlation
df1 = df1.drop(to_drop, axis=1)
#return the new dataframe
return df1
remove_high_corr_columns(df)
```
| github_jupyter |
```
import digits
import tensorflow as tf
from time import time
import itertools as it
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, f1_score, \
recall_score, classification_report, confusion_matrix
DATA_PATH = 'digits.csv'
LEARNING_RATE = 0.1
BATCH_SIZE = 32
EPOCHS = 30
MODEL_DIR = 'cnn/model_{}'
labels, pixels = digits.read(DATA_PATH)
pixels_sq = pixels.reshape(-1, 28, 28)
train_label, test_label, train_pixel, test_pixel = train_test_split(labels, pixels_sq, test_size=0.1,
stratify=labels, shuffle=True, random_state=0)
train_label, dev_label, train_pixel, dev_pixel = train_test_split(train_label, train_pixel, test_size=0.11111111,
stratify=train_label, shuffle=True, random_state=0)
train_label.shape, dev_label.shape, test_label.shape
train_pixel.shape, dev_pixel.shape, test_pixel.shape
num_labels = len(set(labels))
num_labels
def model_fn(features, labels, mode):
c1 = tf.layers.conv2d(features, 4, 3, activation=tf.nn.relu) # 26 x 26
c2 = tf.layers.conv2d(c1, 4, 3, activation=tf.nn.relu) # 24 x 24
m1 = tf.layers.max_pooling2d(c2, 2, 2) # 12 x 12
c3 = tf.layers.conv2d(m1, 8, 3, activation=tf.nn.relu) # 10 x 10
c4 = tf.layers.conv2d(c3, 8, 3, activation=tf.nn.relu) # 8 x 8
c5 = tf.layers.conv2d(c4, 8, 3, activation=tf.nn.relu) # 6 x 6
m2 = tf.layers.max_pooling2d(c5, 2, 2) # 3 x 3
fc = tf.layers.flatten(m2)
logits = tf.layers.dense(fc, num_labels)
if mode == tf.estimator.ModeKeys.PREDICT:
proba = tf.nn.softmax(logits, axis=-1)
predictions = {
'logits': logits,
'probabilities': proba,
'labels': tf.argmax(proba, axis=-1),
}
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
loss = tf.losses.softmax_cross_entropy(labels, logits)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss)
optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
train_op = optimizer.minimize(loss, tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
def td_input_fn(pixels, labels, batch_size, epochs, is_training, buffer=100):
ds = tf.data.Dataset.from_tensor_slices((pixels, labels))
def preprocessing(p, lbl):
p3d = tf.expand_dims(p, axis=-1)
p3d = tf.to_float(p3d)
p3d = tf.image.per_image_standardization(p3d)
lbl_1h = tf.one_hot(lbl, num_labels, dtype=tf.float32)
return p3d, lbl_1h
ds = ds.map(preprocessing, num_parallel_calls=8)
if is_training:
ds = ds.shuffle(buffer * batch_size)
ds = ds.batch(batch_size)
ds = ds.repeat(epochs)
ds = ds.prefetch(buffer)
return ds
def pred_input_fn(pixels, batch_size, buffer=100):
ds = tf.data.Dataset.from_tensor_slices(pixels)
def preprocessing(p):
p3d = tf.expand_dims(p, axis=-1)
p3d = tf.to_float(p3d)
p3d = tf.image.per_image_standardization(p3d)
return p3d
ds = ds.map(preprocessing, num_parallel_calls=8)
ds = ds.batch(batch_size)
ds = ds.prefetch(buffer)
return ds
model_dir = MODEL_DIR.format(int(time()))
config = tf.estimator.RunConfig(tf_random_seed=0,
save_summary_steps=50,
save_checkpoints_steps=250,
keep_checkpoint_max=None)
clf = tf.estimator.Estimator(model_fn, model_dir, config)
train_input_fn = lambda: td_input_fn(train_pixel, train_label, BATCH_SIZE, EPOCHS, True)
dev_input_fn = lambda: td_input_fn(dev_pixel, dev_label, BATCH_SIZE, 1, False)
train_spec = tf.estimator.TrainSpec(train_input_fn)
eval_spec = tf.estimator.EvalSpec(dev_input_fn, None, start_delay_secs=0.1, throttle_secs=0.1)
tf.estimator.train_and_evaluate(clf, train_spec, eval_spec)
checkpoint_path = r'cnn/model_1540258258\model.ckpt-4000'
test_input_fn = lambda: pred_input_fn(test_pixel, BATCH_SIZE)
test_pred = clf.predict(test_input_fn, predict_keys=['labels'], checkpoint_path=checkpoint_path)
test_pred = [p['labels'] for p in test_pred]
test_pred = np.asarray(test_pred)
eval_input_fn = lambda: td_input_fn(test_pixel, test_label, BATCH_SIZE, 1, False)
clf.evaluate(eval_input_fn, checkpoint_path=checkpoint_path)
def print_summary_metrics(y_true, y_pred):
print('precision:', precision_score(y_true, y_pred, average='weighted'))
print('recall:', recall_score(y_true, y_pred, average='weighted'))
print('f1 score:', f1_score(y_true, y_pred, average='weighted'))
print('accuracy:', accuracy_score(y_true, y_pred))
print_summary_metrics(test_label, test_pred)
print(classification_report(test_label, test_pred))
cm = confusion_matrix(test_label, test_pred)
cm
```
| github_jupyter |
# Creating structures in pyiron
This section gives a brief introduction about some of the tools available in pyiron to construct atomic structures.
For the sake of compatibility, our structure class is written to be compatible with the popular Atomistic Simulation Environment package ([ASE](https://wiki.fysik.dtu.dk/ase/)). This makes it possible to use routines from ASE to help set-up structures.
Furthermore, pyiron uses the [NGLview](http://nglviewer.org/nglview/latest/api.html) package to visualize the structures and trajectories interactively in 3D using NGLview-widgets.
As preparation for the following discussion we import a few python libraries
```
import numpy as np
%matplotlib inline
import matplotlib.pylab as plt
```
and create a pyiron project named 'structures':
```
from pyiron import Project
pr = Project(path='structures')
```
## Bulk crystals
In this section we discuss various possibilities to create bulk crystal structures.
### Using `create_structure()`
The simplest way to generate simple crystal structures is using the inbuilt `create_structure()` function specifying the element symbol, Bravais basis and the lattice constant(s)
Note: The output gives a cubic cell rather than the smallest non-orthogonal unit cell.
```
structure = pr.create_structure('Al',
bravais_basis='fcc',
lattice_constant=4.05)
```
To plot the structure interactively in 3D simply use:
```
structure.plot3d()
```
### Using `create_ase_bulk()`
Another convenient way to set up structures is using the `create_ase_bulk()` function which is built on top of the ASE build package for [bulk crystals](https://wiki.fysik.dtu.dk/ase/ase/build/build.html#ase.build.bulk). This function returns an object which is of the pyiron structure object type.
**Example:** fcc bulk aluminum in a cubic cell
```
structure = pr.create_ase_bulk('Al', cubic=True)
structure.plot3d()
```
**Example:** wurtzite GaN in a 3x3x3 repeated orthorhombic cell.
Note:
- In contrast to new_structure = structure.repeat() which creates a new object, set_repeat() modifies the existing structure object.
- Setting `spacefill=False` in the `plot3d()` method changes the atomic structure style to "ball and stick".
```
structure = pr.create_ase_bulk('AlN',
crystalstructure='wurtzite',
a=3.5, orthorhombic=True)
structure.set_repeat([3,3,3])
structure.plot3d(spacefill=False)
```
## Creating surfaces (using ASE)
Surfaces can be created using the `create_surface()` function which is also built on top of the ASE build package for [surfaces](https://wiki.fysik.dtu.dk/ase/_modules/ase/build/surface.html)
**Example:** Creating a 3x4 fcc Al(111) surface with 4 layers and a vacuum of 10 Ångström
```
Al_111 = pr.create_surface("Al", surface_type="fcc111",
size=(3, 4, 4), vacuum=10, orthogonal=True)
Al_111.plot3d()
```
## Creating structures without importing the project class
In all the examples shown above, the structures are create from the pyiron `Project` object. It is also possible to do this without importing/initializing this object. For this the appropriate imports must be made.
```
from pyiron import create_ase_bulk, create_surface
structure = create_ase_bulk('AlN',
crystalstructure='wurtzite',
a=3.5, orthorhombic=True)
structure.set_repeat([3,3,3])
structure.plot3d(spacefill=False)
Al_111 = create_surface("Al", surface_type="fcc111",
size=(3, 4, 4), vacuum=10, orthogonal=True)
Al_111.plot3d()
```
### Using the ASE spacegroup class
```
from ase.spacegroup import crystal
from pyiron import ase_to_pyiron
a = 9.04
skutterudite = crystal(('Co', 'Sb'),
basis=[(0.25, 0.25, 0.25), (0.0, 0.335, 0.158)],
spacegroup=204,
cellpar=[a, a, a, 90, 90, 90])
skutterudite = ase_to_pyiron(skutterudite)
skutterudite.plot3d()
```
## Accessing the properties of the structure object
Using the bulk aluminum fcc example from before the structure object can be created by
```
structure = pr.create_ase_bulk('Al', cubic=True)
```
A summary of the information about the structure is given by using
```
print(structure)
```
The cell vectors of the structure object can be accessed and edited through
```
structure.cell
```
The positions of the atoms in the structure object can be accessed and edited through
```
structure.positions
```
## Point defects
### Creating a single vacancy
We start by setting up a 4x4x4 supercell
```
structure = pr.create_ase_bulk('Al', cubic=True)
structure.set_repeat([4,4,4])
```
To create the vacancy at position index "0" simply use:
```
del structure[0]
```
To plot the structure that now contains a vacancy run:
```
structure.plot3d()
```
### Creating multiple vacancies
```
# First create a 4x4x4 supercell
structure = pr.create_ase_bulk('Al', cubic=True)
structure.set_repeat([4,4,4])
print('Number of atoms in the repeat unit: ',structure.get_number_of_atoms())
```
The `del` command works for passing a list of indices to the structure object. For example, a random set of n$_{\text{vac}}$ vacancies can be created by using
```
# Generate a list of indices for the vacancies
n_vac = 24
vac_ind_lst = np.random.permutation(len(structure))[:n_vac]
# Remove atoms according to the "vac_ind_lst"
del structure[vac_ind_lst]
# Visualize the structure
print('Number of atoms in the repeat unit: ',structure.get_number_of_atoms())
structure.plot3d()
```
### Random substitutial alloys
```
# Create a 4x4x4 supercell
structure = pr.create_ase_bulk('Al', cubic=True)
structure.set_repeat([4,4,4])
```
Substitutional atoms can be defined by changing the atomic species accessed through its position index.
Here, we set $n_{\text{sub}}$ magnesium substitutional atoms at random positions
```
n_sub = 24
structure[np.random.permutation(len(structure))[:n_sub]] = 'Mg'
# Visualize the structure and print some additional information about the structure
print('Number of atoms in the repeat unit: ',structure.get_number_of_atoms())
print('Chemical formula: ',structure.get_chemical_formula())
structure.plot3d()
```
## Explicit definition of the structure
You can also set-up structures through the explicit input of the cell parameters and positions
```
cell = 10.0 * np.eye(3) # Specifying the cell dimensions
positions = [[0.25, 0.25, 0.25], [0.75, 0.75, 0.75]]
elements = ['O', 'O']
# Now use the Atoms class to create the instance.
O_dimer = pr.create_atoms(elements=elements, scaled_positions=positions, cell=cell)
O_dimer.plot3d()
```
## Importing from cif/other file formats
Parsers from ASE can be used to import structures from other formats. In this example, we will download and import a Nepheline structure from the [Crystallography Open Database (COD)](http://www.crystallography.net/cod/index.php)
```
# The COD structures can be accessed through their unique COD identifier
cod = 1008753
filename = '{}.cif'.format(cod)
url = 'http://www.crystallography.net/cod/{}'.format(filename)
cif_structure = """\
#------------------------------------------------------------------------------
#$Date: 2015-01-27 21:58:39 +0200 (Tue, 27 Jan 2015) $
#$Revision: 130149 $
#$URL: svn://www.crystallography.net/cod/cif/1/00/87/1008753.cif $
#------------------------------------------------------------------------------
#
# This file is available in the Crystallography Open Database (COD),
# http://www.crystallography.net/
#
# All data on this site have been placed in the public domain by the
# contributors.
#
data_1008753
loop_
_publ_author_name
'Buerger, M J'
'Klein, G E'
'Donnay, G'
_publ_section_title
;
Determination of the crystal structure of nepheline
;
_journal_coden_ASTM AMMIAY
_journal_name_full 'American Mineralogist'
_journal_page_first 805
_journal_page_last 818
_journal_volume 39
_journal_year 1954
_chemical_formula_structural 'K Na3 Al4 Si4 O16'
_chemical_formula_sum 'Al4 K Na3 O16 Si4'
_chemical_name_mineral Nepheline
_chemical_name_systematic 'Potassium trisodium tetraaluminium silicate'
_space_group_IT_number 173
_symmetry_cell_setting hexagonal
_symmetry_Int_Tables_number 173
_symmetry_space_group_name_Hall 'P 6c'
_symmetry_space_group_name_H-M 'P 63'
_cell_angle_alpha 90
_cell_angle_beta 90
_cell_angle_gamma 120
_cell_formula_units_Z 2
_cell_length_a 10.01
_cell_length_b 10.01
_cell_length_c 8.405
_cell_volume 729.4
_cod_database_code 1008753
loop_
_symmetry_equiv_pos_as_xyz
x,y,z
-y,x-y,z
y-x,-x,z
-x,-y,1/2+z
y,y-x,1/2+z
x-y,x,1/2+z
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_symmetry_multiplicity
_atom_site_Wyckoff_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
_atom_site_attached_hydrogens
_atom_site_calc_flag
K1 K1+ 2 a 0. 0. 0. 1. 0 d
Al1 Al3+ 2 b 0.3333 0.6667 0.18 1. 0 d
Si1 Si4+ 2 b 0.3333 0.6667 0.82 1. 0 d
O1 O2- 2 b 0.3333 0.6667 0. 1. 0 d
Na1 Na1+ 6 c 0.008 0.432 0. 1. 0 d
Al2 Al3+ 6 c 0.092 0.33 0.67 1. 0 d
Si2 Si4+ 6 c 0.092 0.33 0.33 1. 0 d
O2 O2- 6 c 0.02 0.33 0.5 1. 0 d
O3 O2- 6 c 0.18 0.5 0.75 1. 0 d
O4 O2- 6 c 0.17 0.53 0.25 1. 0 d
O5 O2- 6 c 0.23 0.28 0.25 1. 0 d
O6 O2- 6 c 0.23 0.28 0.75 1. 0 d
loop_
_atom_type_symbol
_atom_type_oxidation_number
K1+ 1.000
Al3+ 3.000
Si4+ 4.000
O2- -2.000
Na1+ 1.000"""
# Download and save the structure file locally
# import urllib
# urllib.request.urlretrieve(url=url, filename='strucs.'+filename);
with open('strucs.'+filename, "w") as f:
f.writelines(cif_structure)
# Using ase parsers to read the structure and then convert to a pyiron instance
import ase
from pyiron import ase_to_pyiron
structure = ase_to_pyiron(ase.io.read(filename='strucs.'+filename,
format='cif'))
structure.info["cod"] = cod
structure.plot3d()
```
Structures can be stored indepently from jobs in HDF5 by using the special `StructureContainer` job. To save to disk, call `run()`.
```
container = pr.create_job(pr.job_type.StructureContainer, "nepheline")
container.structure = structure
container.run()
```
It's also possible to store multiple structures in one container and to store directly from a job. Let's use this here to store the equilibrated structures at finite temperatures.
```
al_container = pr.create_job(pr.job_type.StructureContainer, "al_temp", delete_existing_job=True)
for T in (400, 600, 800):
j = pr.create_job(pr.job_type.Lammps, "T_{}".format(T))
j.structure = pr.create_ase_bulk("Al", cubic = True)
j.potential = j.list_potentials()[0]
j.calc_md(temperature=T, n_ionic_steps=1000, pressure=0)
j.run()
structure = j.get_structure(-1)
structure.info["T"] = T
structure.info["P"] = 0
al_container.append(structure)
al_container.run()
al_container.structure_lst[0].info
al_container.structure_lst
```
| github_jupyter |
# Problem Set 2
See “Check Your Understanding” from [collections](../python_fundamentals/collections.ipynb) and [control flow](../python_fundamentals/control_flow.ipynb)
Note: unless stated otherwise, the timing of streams of payoffs is immediately at
time `0` where appropriate. For example, dividends $ \{d_1, d_2, \ldots d_{\infty}\} $
should be valued as $ d_1 + \beta d_2 + \beta^2 d_3 \ldots = \sum_{j=0}^{\infty} \beta^j d_j $.
This timing is consistent with the lectures and most economics, but is different from the timing assumptions
of some finance models.
## Question 1-4
Consider a bond that pays a \$500 dividend once a quarter.
It pays in the months of March, June, September, and December.
It promises to do so for 10 years after you purchase it (start in January 2019).
You discount the future at rate $ r = 0.005 $ per _month_.
### Question 1
How much do you value the asset in January 2019?
```
# Your code goes here
```
### Question 2
Consider a different asset that pays a lump sum at its expiration date rather than a quarterly dividend of \$500 dollars, how much would this asset need to pay in December 2028 (the final payment date of the quarterly asset) for the two assets to be equally valued?
```
# Your code goes here
```
### Question 3
How much should you be willing to pay if your friend bought the quarterly asset (from the main text) in January
2019 but wanted to sell it to you in October 2019?
```
# Your code goes here
```
### Question 4
If you already knew that your discount rate would change annually according to the
table below, at what price would you value the quarterly asset (from the main text) in January 2019?
*Hint*: There are various ways to do this… One way might include a zipped loop for years and a
second loop for months.
*Bonus Points*: Can you create the list of interest rates without calculating each year individually?
|Year|Discount Rate|
|:----:|:-------------:|
|2019|0.005|
|2020|0.00475|
|2021|0.0045|
|2022|0.00425|
|2023|0.004|
|2024|0.00375|
|2025|0.0035|
|2026|0.00325|
|2027|0.003|
|2028|0.00275|
Hint: create appropriate collections typing from the data directly in the code. You cannot parse the
text table directly.
```
# Your code goes here
```
## Questions 5-6
Companies often invest in training their employees to raise their
productivity. Economists sometimes wonder why companies
spend money on training employees when this incentivizes other companies to poach
their employees with higher salaries since the employees gain human capital from training.
Imagine it costs a company 25,000 dollars to teach their employees Python, but
it also raises their output by 2,500 dollars per month. The company discounts the future
at rate of $ r = 0.01 $ per month.
### Question 5
For how many full months does an employee need to stay at a company for that company to make a profit for
paying for their employees’ Python training?
```
# Your code goes here
```
### Question 6
Imagine that 3/4 of the employees stay for 8 months and 1/4 of the employees stay
for 24 months. Is it worth it for the company to invest in employee Python training?
```
# Your code goes here
```
## Question 7 and 8
Take the following stock market data, including a stock ticker, price, and company name:
|Ticker|Price|Name|
|:------:|:-------:|:----------------:|
|AAPL|175.96|Apple Inc.|
|GOOGL|0.00475|Alphabet Inc.|
|TVIX|0.0045|Credit Suisse AG|
Hint: create appropriate collections typing from the data directly in the code. You cannot parse the table directly.
### Question 7
- Create a new dict which associates ticker with its price. i.e. the dict key should be a string, the dict value should be a number, and you can ignore the name.
- Display a list of the underlying stock tickers using the dictionary. Hint:
use `.<TAB>` on your dictionary to look for methods to get the list
```
# Your code goes here
```
### Question 8 (More Challenging)
Using the same data,
- Create a new dict, whose values are dictionaries that have a price and name key. These keys should associate the stock tickers with both its stock price and the company name.
- Display a list of the underlying stock names (i.e. not the ticker symbol) using the dictionary. Hint: use a comprehension.
```
# Your code goes here
```
## Question 9 (More Challenging)
Imagine that we’d like to invest in a small startup company. We have secret
information (given to us from a legal source, like statistics, so that we
aren’t insider trading or anything like that…) that the startup will have
4,000 dollars of profits for its first 5 years,
then 20,000 dollars of profits for the next 10 years, and
then 50,000 dollars of profits for the 10 years after that.
After year 25, the company will go under and pay 0 profits.
The company would like you to buy 50% of its shares, which means that you
will receive 50% of all of the future profits.
If you discount the future at $ r = 0.05 $, how much would you be willing to pay?
Hint: Think of this in terms of NPV; you should use some conditional
statements.
Bonus points: Can you think of a way to use the summation equations from the lectures
to check your work!?
```
profits_0_5 = 4000
profits_5_15 = 20_000
profits_15_25 = 50_000
willingness_to_pay = 0.0
for year in range(25):
print("replace with your code!")
```
## Question 10 (More Challenging)
For the tuple `foo` below, use a combination of `zip`, `range`, and `len` to mimic `enumerate(foo)`. Verify that your proposed solution is correct by converting each to a list and checking equality with == HINT: You can see what the answer should look like by starting with `list(enumerate(foo))`.
```
foo = ("good", "luck!")
# Your code goes here
```
## Question 11
In economics, when an individual has knowledge, skills, or education that provides them with a
source of future income, we call it [human capital](https://en.wikipedia.org/wiki/Human_capital).
When a student graduating from high school is considering whether to continue with post-secondary
education, they may consider that it gives them higher-paying jobs in the future, but requires that
they commence work only after graduation.
Consider the simplified example where a student has perfectly forecastable employment and is given two choices:
1. Begin working immediately and make 40,000 dollars a year until they retire 40 years later.
1. Pay 5,000 dollars a year for the next 4 years to attend university and then get a job paying
50,000 dollars a year until they retire 40 years after making the college attendance decision.
Should the student enroll in school if the discount rate is $ r = 0.05 $?
```
# Discount rate
r = 0.05
# High school wage
w_hs = 40_000
# College wage and cost of college
c_college = 5_000
w_college = 50_000
# Compute npv of being a hs worker
# Compute npv of attending college
# Compute npv of being a college worker
# Is npv_collegeworker - npv_collegecost > npv_hsworker
```
| github_jupyter |
# Collect all data
```
import struct
def get_byte_list(lbl_file_name, img_file_name):
'''
Returns a list of tuples,
each tuple contains a label and an image, both in bytes.
'''
tuples = []
with open(lbl_file_name, 'rb') as lbl_file, open(img_file_name, 'rb') as img_file:
magic_number, num = struct.unpack('>II', lbl_file.read(8))
_magic, _num, rows, cols = struct.unpack('>IIII', img_file.read(16))
assert(num == _num)
for i in range(num):
label = lbl_file.read(1)
img = img_file.read(rows*cols)
tuples.append((label, img))
return tuples
!ls -l ../src/data/mnist/t*
from pathlib import Path
DATA_DIR = Path('../src/data/mnist')
train_data = get_byte_list(DATA_DIR / 'train-labels.idx1-ubyte', DATA_DIR / 'train-images.idx3-ubyte')
test_data = get_byte_list(DATA_DIR / 't10k-labels.idx1-ubyte', DATA_DIR / 't10k-images.idx3-ubyte')
data = train_data + test_data
print('tot num data', len(data))
```
# Shuffle the data
```
SEED = 20180516
from random import shuffle
from random import seed
seed(SEED)
print('first 4 before:', list(zip(*data[:4]))[0])
shuffle(data)
print('first 4 after:', list(zip(*data[:4]))[0])
```
# Split into folds
```
def print_folds(folds):
for i in range(len(folds[0])):
print(i, end='\t')
for fold in folds:
print(int.from_bytes(fold[i][0], byteorder='big'), end='\t')
print()
NUM_FOLDS = 5
n_data = len(data)
n_data_per_fold = n_data // NUM_FOLDS
print(n_data_per_fold, 'per fold; rest:', n_data%NUM_FOLDS)
tmp_data = data
folds = []
for i in range(NUM_FOLDS):
heads = tmp_data[:n_data_per_fold]
tail = tmp_data[n_data_per_fold:]
folds.append(heads)
tmp_data = tail
```
# Write data to files
```
import os
directory = DATA_DIR.parent / 'mnist_iid_cv'
try:
os.mkdir(directory)
except FileExistsError:
print("A folder already exists:", directory)
for i in range(NUM_FOLDS):
folder_name = directory / ('fold' + str(1+i))
try:
os.mkdir(folder_name)
except FileExistsError:
print("A folder already exists:", folder_name)
os.listdir(directory)
def partition_mnist_list(pairs, nr_data_per_car, output_dir):
for i, nr_data in enumerate(nr_data_per_car):
number_list = pairs[:nr_data]
pairs = pairs[nr_data:]
car_i = i+1
lbls_file_name = output_dir / f"car{car_i}-labels.byte"
imgs_file_name = output_dir / f"car{car_i}-images.byte"
with open(lbls_file_name, 'wb') as lbl_file, \
open(imgs_file_name, 'wb') as img_file:
lbl_magic_nr = b'\x00\x00\x08\x01' # ubyte, 1-dim
n_lbl = (nr_data).to_bytes(4, byteorder='big')
lbl_header = lbl_magic_nr + n_lbl
lbl_file.write(lbl_header)
img_magic_nr = b'\x00\x00\x08\x03' # ubyte, 3-dim
n_imgs = (nr_data).to_bytes(4, byteorder='big')
n_rows = (28).to_bytes(4, byteorder='big')
n_cols = (28).to_bytes(4, byteorder='big')
img_header = img_magic_nr + n_imgs + n_rows + n_cols
img_file.write(img_header)
for (lbl, img) in number_list:
lbl_file.write(lbl)
img_file.write(img)
folders = os.listdir(directory)
folders.sort()
for fold_name, data_pairs in zip(folders, folds):
output_dir = directory / fold_name
print(output_dir)
tot_data = len(data_pairs)
partition_mnist_list(data_pairs, [tot_data // 100 for _ in range(100)], output_dir)
```
# Read how many digit each client has
```
import numpy
import struct
def read_mnist_data(fname_img, fname_lbl):
'''
Read MNIST data from a byte file.
Return: tuple of inputs and labels (numpy)
'''
with open(fname_lbl, 'rb') as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
lbl = numpy.fromfile(flbl, dtype=numpy.int8)
if len(lbl) != num:
print('Header mismatch. #labels != header number')
with open(fname_img, 'rb') as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = numpy.fromfile(fimg, dtype=numpy.uint8).reshape(num, rows, cols)
return (img, lbl)
import os
directory = DATA_DIR.parent / 'mnist_iid_cv'
def read_car_data(folder_names, input_dir, car_i):
'''
Read data for one car
'''
x_return = []
y_return = []
for fold_name in folder_names:
# Read file
input_directory = input_dir / fold_name
label_file = input_directory / f"car{car_i}-labels.byte"
image_file = input_directory / f"car{car_i}-images.byte"
x, y = read_mnist_data(image_file, label_file)
# accumulate/store chosen
x_return.append(x)
y_return.append(y)
# return
return (numpy.concatenate(x_return), numpy.concatenate(y_return))
folders = os.listdir(directory)
folders.remove('fold1')
list_of_pairs = [read_car_data(folders, directory, car_i) for car_i in range(1, 101)]
from itertools import groupby
import operator, functools
freq = dir()
for i in range(10):
freq[i] = 0
for i,(_, car_lbls) in enumerate(list_of_pairs):
groups = groupby(numpy.sort(car_lbls))
keys, data_length = zip(*[(key,len(list(data))) for (key, data) in groups])
print(i+1, keys)
print(i+1, functools.reduce(operator.add, data_length))
for num in keys:
freq[num] += 1
print("Frequency")
for i in range(10):
print(i,":", freq[i])
```
| github_jupyter |
```
from result_records import TFRecordLoader
ds = TFRecordLoader('memorization_results.tfrecords')
```
# Loading Data
> consists of 4063300 records
```
data = []
indicies = []
import numpy as np
from tqdm import tqdm
for i,(res,idx) in tqdm(enumerate(ds)):
res,idx = res.numpy(),idx.numpy()
if(not (np.isnan(res) or np.isinf(res))):
data.append(res)
indicies.append(idx)
data = np.array(data)
indicies = np.array(indicies)
```
# Memorization Metric plots
> Plotting average values of memorization metric over a bucketed range of values
```
from IPython.display import display
import matplotlib.pyplot as plt
import ipywidgets as widgets
%matplotlib inline
import numpy as np
class Plotter:
def __init__(self,title,xlabel,ylabel,y,x=None,size=25,default_slider_value=None):
self.title = title
self.xlabel = xlabel
self.ylabel = ylabel
self.default_slider_value = default_slider_value
self.y = y
self.x = x
if(x is None):
self.x = [i for i in range(len(data))]
self.size = 25
self.params = {'legend.fontsize': 'large',
'figure.figsize': (15,5),
'axes.labelsize': size,
'axes.titlesize': size,
'xtick.labelsize': size*0.75,
'ytick.labelsize': size*0.75,
'axes.titlepad': 25,
'font.family':'sans-serif',
'font.weight':'bold',
'text.color':'aqua'
}
def plot_data(self,scale):
scale = 2**scale #Converting log scale to normal scale
buckets = []
length = len(self.y)
bucket_size = length//scale
index = []
for i in range(0,length,bucket_size):
buckets.append(self.y[i:i+bucket_size].mean())
index.append(self.x[min(i+bucket_size-1,len(indicies)-1)])
plt.plot(index,buckets)
plt.rcParams.update(self.params)
plt.title(self.title)
plt.xlabel(self.xlabel)
plt.ylabel(self.ylabel)
plt.show()
def clicked(self,b):
self.out.clear_output()
scale = self.slider.value
with self.out:
self.plot_data(scale)
def run(self):
self.out = widgets.Output()
button = widgets.Button(description="Plot Value")
slider_max = int(np.log2(len(self.y)))
if(self.default_slider_value is not None):
default_slider_value = self.default_slider_value
else:
default_slider_value = np.random.choice([i for i in range(1,slider_max)])
self.slider = widgets.IntSlider(min=1, max=slider_max,
value=default_slider_value,
description="Scale",
layout=widgets.Layout(width='50%'))
box_layout = widgets.Layout(
display='flex',
flex_flow='column',
align_items='center',
width='80%'
)
box = widgets.VBox(
[
self.out,
self.slider,
button
],
layout=box_layout
)
with self.out:
self.plot_data(default_slider_value)
button.on_click(self.clicked)
display(box)
plotter = Plotter(title="Memorization Metric",
xlabel='Index',ylabel='NLL Loss',
x=indicies,y=data)
plotter.run()
```
# Correlation
```
from scipy import signal
correlation = signal.correlate(indicies, data, mode="full")
plotter = Plotter(xlabel='indicies',ylabel='correlation',
title='Correlation',x=indicies,y=correlation,default_slider_value=11)
plotter.run()
```
# Statistics
```
import matplotlib.pyplot as plt
SAMPLE_VALUE = len(data)*25//100
from sklearn.metrics import r2_score
r2 = r2_score(indicies,data)
print(f"R2 Score between indicies and data: {r2:.5f}")
avg_start = data[:SAMPLE_VALUE].mean()
avg_end = data[SAMPLE_VALUE:].mean()
var_start = data[:SAMPLE_VALUE].var()
var_end = data[SAMPLE_VALUE:].var()
print(f"Average NLL Loss changed from {avg_start:.5f} to {avg_end:.5f}")
print(f"Varience of NLL Loss changed from {var_start:.5f} to {var_end:.5f}")
print("Trend of very slight improvement continues")
```
| github_jupyter |
```
## Import Libraies
import pandas as pd
%pylab inline
import numpy as np
import re
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
from math import sqrt
from sklearn.ensemble import RandomForestRegressor
from sklearn import preprocessing
import sklearn.model_selection as ms
import sklearn.metrics as sklm
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import roc_curve,auc
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn import datasets, linear_model
from scipy.stats import pearsonr
from sklearn.model_selection import KFold, StratifiedKFold, cross_val_score
from sklearn import linear_model, tree, ensemble
import pickle
import warnings
warnings.filterwarnings('ignore')
```
# Data Cleaning and Preparation
```
## Load housing data ##
yaba = pd.read_csv('Data/House_prices_yaba.csv')
surulere = pd.read_csv('Data/House_prices_surulere.csv')
ikeja = pd.read_csv('Data/House_prices_ikeja.csv')
gbagada = pd.read_csv('Data/House_prices_gbagada.csv')
ajah = pd.read_csv('Data/House_prices_ajah.csv')
lekki = pd.read_csv('Data/House_prices_lekki_phase_1.csv')
ikorodu = pd.read_csv('Data/House_prices_ikorodu.csv')
## Append housing data into data set ##
data1 = yaba.append(surulere)
data2 = data1.append(gbagada)
data3 = data2.append(lekki)
data4 = data3.append(ikorodu)
data5 = data4.append(ikeja)
lagos_house_data = data5.append(ajah)
## Data Shape ##
lagos_house_data.shape
```
## Checking and Removing Duplicate Records from the Dataset
```
lagos_house_data[lagos_house_data.duplicated(subset=['property_code'])].sort_values(by='property_code')
## Drop Duplicates From DataSet
lagos_house_data = lagos_house_data.drop_duplicates()
## No property in the dataset that have all its columns duplicated by any other
lagos_house_data.shape
```
## Checking, Removing and Replacing Null Values in the Dataset
```
## Check Sum Of Null Values ##
lagos_house_data.isnull().sum()
```
###### <ul>
<li>Null values in the 'price' colum has to be removed to not affect predictive model</li>
<li>Null values in 'detail_description' & 'key_features' have to be replaced </li>
</ul>
```
## Remove all Null Values in Price Colum ##
lagos_house_data.dropna(subset =['price'], inplace=True)
## Replace Null Value ##
lagos_house_data['detail_description'].replace(np.nan,'samples must be there', inplace=True)
## Replace Null Value ##
lagos_house_data['key_features'].replace(np.nan,'samples must be there', inplace=True)
```
### Removing Special Charaters from the 'price' Column
```
## Define Spliitter fuction to Remove
def splitter(x ,retrieve_dollar):
price,currency_type = None, None
if '$' in str(x):
currency_type ='$'
price = float(str(x).split(' ')[1])++
else:
price = float(x)
currency_type = 'N'
if retrieve_dollar:
return currency_type
else:
return price
price_values = lagos_house_data.price.apply(lambda x :splitter(x,False))
currency_type = lagos_house_data.price.apply(lambda x :splitter(x,True))
price_values
lagos_house_data['price'] = price_values
lagos_house_data.isna().sum()
```
### Getting New Features From Proprety Details
### Actions
###### <ul>
Create new features that would have an effect on the price of properties in Lagos like:
<li>Type of properties(Houses) and Number of bedrooms will effect on the price </li>
<li></li>
</ul>
```
## Converting strings to lower case ##
lagos_house_data['detail_description'] = [x.lower() for x in lagos_house_data['detail_description']]
lagos_house_data['specific_location'] = [x.lower() for x in lagos_house_data['specific_location']]
lagos_house_data['description'] = [x.lower() if type(x) != float else x for x in lagos_house_data['description']]
lagos_house_data['service_level'] = [x.lower() if type(x) != float else x for x in lagos_house_data['service_level']]
## Locator Fuction ##
def locator(column, iterator):
for i in iterator:
if i in column:
return i.strip().capitalize()
## List of property type we wish to locate ##
property_type = ['semi detached bungalow', 'semi detached duplex', 'detached bungalow', 'self contain',
'mini flat', 'detached duplex', 'terraced bungalow', 'terraced duplex', 'penthouse flat',
'massionette house', 'blocks of flats', 'flat / apartment']
# Finding Property Type and dropping none values in the column
lagos_house_data['Property_Type'] = lagos_house_data['detail_description'].apply(lambda x: locator(x, property_type))
lagos_house_data.dropna(subset=['Property_Type'], inplace=True)
lagos_house_data.head(3)
## Dropping lands, office and commercial properties ##
lagos_house_data.drop(lagos_house_data[(lagos_house_data['detail_description'].str.contains('land')) | (lagos_house_data['detail_description'].str.contains('office')) | (lagos_house_data['detail_description'].str.contains('commercial'))].index, inplace=True)
## Finding Key Features from 'description' and 'detail_description' ##
lagos_house_data['Parking_Space'] = np.where(
((lagos_house_data.detail_description.str.contains('parking space')) | (lagos_house_data.description.str.contains('parking space'))), 1, 0)
lagos_house_data['Security'] = np.where(
((lagos_house_data.detail_description.str.contains('security')) | (lagos_house_data.description.str.contains('security'))), 1, 0)
lagos_house_data['Electricity'] = np.where(
((lagos_house_data.detail_description.str.contains('electricity')) | (lagos_house_data.description.str.contains('electricity'))), 1, 0)
lagos_house_data['Furnished'] = np.where(
((lagos_house_data.detail_description.str.contains('furnished')) | (lagos_house_data.description.str.contains('furnished'))), 1, 0)
lagos_house_data['Security_Doors'] = np.where(
((lagos_house_data.detail_description.str.contains('security doors')) | (lagos_house_data.description.str.contains('security doors'))), 1, 0)
lagos_house_data['CCTV'] = np.where(((lagos_house_data.detail_description.str.contains('cctv')) | (lagos_house_data.description.str.contains('cctv'))), 1, 0)
lagos_house_data['Pool'] = np.where(
((lagos_house_data.detail_description.str.contains('pool')) | (lagos_house_data.description.str.contains('pool'))), 1, 0)
lagos_house_data['Gym'] = np.where(
((lagos_house_data.detail_description.str.contains('gym')) | (lagos_house_data.description.str.contains('gym'))), 1, 0)
lagos_house_data['BQ'] = np.where(
((lagos_house_data.detail_description.str.contains('bq')) | (lagos_house_data.detail_description.str.contains('serviced quarters'))), 1, 0)
pd.set_option('display.max_columns',None)
lagos_house_data.head(3)
```
## Outliers Check and Treatment
```
## Checking the relationship betten price and property type in diffrent loations ##
sns.catplot(
data=lagos_house_data[(lagos_house_data['price'] <= 5000000)] ,x='Property_Type', y='price',
col='location', kind='box', col_wrap=2, aspect=2.5, legend= 'True'
)
```
### Observations
###### <ul>
<li>Detached duplex, Semi detached deplex and Terraced duplex have the highest price in all loactions </li>
<li>Houses in Lekki pahse 1 has the highs prices of rent while houses in Ikorodu has the lowest rent prices </li>
</ul>
```
## Checking the relationship betten price and property type in diffrent loations ##
sns.catplot(
data=lagos_house_data[(lagos_house_data['price'] <= 4000000)] ,x='bed', y='price',
col='location', kind='box', col_wrap=3,
)
```
### Actions
###### <ul>
<li>Detect and Remove outliers form data </li>
<li>Reduse the maximum number of rooms from 7 to 5 </li>
</ul>
```
## Removing properties with zero value, no bedrooms, no bath, no toilet, and those with more that 5 beds
lagos_house_data =lagos_house_data[((lagos_house_data['bed'] > 0) & (lagos_house_data['bed'] <= 5))]
lagos_house_data =lagos_house_data[((lagos_house_data['bath'] > 0) & (lagos_house_data['bath'] <= 5))]
lagos_house_data =lagos_house_data[((lagos_house_data['toilet'] > 0) & (lagos_house_data['toilet'] <= 5))]
lagos_house_data =lagos_house_data[lagos_house_data['price'] > 0]
lagos_house_data =lagos_house_data[(lagos_house_data['bath'] > 0) & (lagos_house_data['toilet'] > 0)]
## Cheaking Data Shape
lagos_house_data.shape
# Function to Detect Outlier
def outlier(df, column):
q25, q75 = np.percentile(df[column], 25), np.percentile(df[column], 75)
iqr = q75 - q25
cut_off = iqr * 1.5
lower, upper = q25 - cut_off, q75 + cut_off
outliers = [x for x in df[column] if x < lower or x > upper]
index = df.loc[df[column].isin(outliers)].index
return index
# Dropping price outiers
odd_price = outlier(lagos_house_data, 'price')
lagos_house_data.drop(odd_price, inplace=True)
## Checking the relationship betten price and property type in diffrent loations ##
sns.catplot(
data=lagos_house_data[(lagos_house_data['price'] <= 8000000)] ,x='bed', y='price',
col='location', kind='box', col_wrap=3,
)
lagos_house_data.head(2)
cleaned_data= lagos_house_data.drop(['service_level','specific_location','features','description','category','property_code','date_posted/updated','key_features','detail_description','Unnamed: 0'],axis=1)
cleaned_data.head(6)
cleaned_data.to_csv('Data/cleaned_data.csv')
```
| github_jupyter |
# Analysis of the loads acting on the building
## *Matteo Franzoi* - Academic Year 2019/2020
### matricola 166788 (triennale)
---
```
from engineering_notation import EngNumber
import math
import numpy as np
from decimal import Decimal
```
---
#### Snow Load
```
qsk = 1.39*(1+(788/728)**2);
print(qsk, 'kN/m^2\n~=')
qsk = EngNumber(qsk, precision=2)
print(qsk, 'kN/m^2')
#Copertura piana
mu_1 = .8;
#Non diversamente indicato
C_E = 1;
#Cautelativamente (no info)
C_t = 1;
def qs(qsk, mu, CE, Ct):
return qsk * mu * CE * Ct
qs1 = qs(qsk, mu_1, C_E, C_t);
qs1 = EngNumber(qs1, precision=2)
print(qs1, 'kN/m^2')
mu_w = (18 + 6)/12.4
mu_w < 4.11
mu_w = EngNumber(mu_w, precision=2)
mu_w
if 6 < 2*6.2:
mu_2 = (mu_1 + mu_w)/2
else:
mu_2 = mu_1 + mu_w/2
mu_2 = EngNumber(mu_2)
mu_2
qs2 = qs(qsk, mu_2, C_E, C_t);
qs2 = EngNumber(qs2, precision=2)
print(qs2, 'kN/m^2')
qs = max(qs1, qs2)
qs
```
---
#### Wind Load
```
q_cin = 1/2 * 1.25 * 25**2
q_cin
def ce(z, kr, ct, zmin):
return kr**2 *ct*math.log(z/zmin)*(7+ct*math.log(z/zmin))
c_e = EngNumber(ce(9.7, .22, 1, .3), precision=2);
c_e
c_p=.2;
c_d=1;
p = q_cin * c_e*c_p*c_d;
p = float(p/1000)
p = round(p,3)
print(p, 'kN/m^2')
```
---
## SLU
```
#-------------------
# TRAVE P13 - P16
# COMBO 1: NEVE PRINCIPALE
Q11max_slu = 1.3*(8+11.52 + 3.75) + 1.5*(11.55+7.98 + 9.2) + 1.5*(15.01) + 1.5*.7*(7.5+14.40) + 1.5*.6*.5
Q11min_slu = 1*(8+11.52 + 3.75) + .8*(11.55+7.98 + 9.2)
# COMBO 2: CAT. B (TERRAZZO) PRINCIPALE
Q12max_slu = 1.3*(8+11.52 + 3.75) + 1.5*(11.55+7.98 + 9.2) + 1.5*0.5*15.01 + 1.5*.7*(7.5) + 1.5*(14.40) + 1.5*.6*.3
Q12min_slu = 1*(8+11.52 + 3.75) + .8*(11.55+7.98 + 9.2)
# COMBO 3: CAT. B2 (INTERNO) PRINCIPALE
Q13max_slu = 1.3*(8+11.52 + 3.75) + 1.5*(11.55+7.98 + 9.2) + 1.5*0.5*15.01 + 1.5*.7*14.40 + 1.5*7.5 + 1.5*.6*.3
Q13min_slu = 1*(8+11.52 + 3.75) + .8*(11.55+7.98 + 9.2)
#-------------------
# TRAVE P16 - P17
# COMBO 1: NEVE PRINCIPALE
Q21max_slu = 1.3*(8+6.72 + 3.75) + 1.5*(11.55+4.65 + 9.2) + 1.5*(10.30) + 1.5*.7*(7.5+8.4) + 1.5*.6*.3
Q21min_slu = 1*(8+6.72 + 3.75) + .8*(11.55+4.65 + 9.2)
# COMBO 2: CAT. B (TERRAZZO) PRINCIPALE
Q22max_slu = 1.3*(8+6.72 + 3.75) + 1.5*(11.55+4.65 + 9.2) + 1.5*.5*(10.30) + 1.5*.7*(7.5) + 1.5*(8.4) + 1.5*.6*.3
Q22min_slu = 1*(8+6.72 + 3.75) + .8*(11.55+4.65 + 9.2)
# COMBO 3: CAT. B2 (INTERNO) PRINCIPALE
Q23max_slu = 1.3*(8+6.72 + 3.75) + 1.5*(11.55+4.65 + 9.2) + 1.5*.5*(10.30) + 1.5*(7.5) + 1.5*.7*(8.4) + 1.5*.6*.3
Q23min_slu = 1*(8+6.72 + 3.75) + .8*(11.55+4.65 + 9.2)
#-------------------
# TRAVE P17 - VANO SCALA
# COMBO 1: NEVE PRINCIPALE
Q31max_slu = 1.3*(6+3.2 + 3.75) + 1.5*(8.66+2.215 + 9.2) + 1.5*(4.17) + 1.5*.7*(5.63+4) + 1.5*.6*.138
Q31min_slu = 1*(6+3.2 + 3.75) + .8*(8.66+2.215 + 9.2)
# COMBO 2: CAT. B (TERRAZZO) PRINCIPALE
Q32max_slu = 1.3*(6+3.2 + 3.75) + 1.5*(8.66+2.215 + 9.2) + 1.5*.5*(2.41) + 1.5*.7*(5.63)+ 1.5*(4) + 1.5*.6*.138
Q32min_slu = 1*(6+3.2 + 3.75) + .8*(8.66+2.215 + 9.2)
# COMBO 3: CAT. B2 (INTERNO) PRINCIPALE
Q33max_slu = 1.3*(6+3.2 + 3.75) + 1.5*(8.66+2.215 + 9.2) + 1.5*.5*(2.41) + 1.5*(5.63)+ 1.5*.7*(4) + 1.5*.6*.138
Q33min_slu = 1*(6+3.2 + 3.75) + .8*(8.66+2.215 + 9.2)
Q1max_slu = EngNumber(Q11max_slu, precision=2)
Q1min_slu = EngNumber(Q11min_slu, precision=2)
Q2max_slu = EngNumber(Q21max_slu, precision=2)
Q2min_slu = EngNumber(Q21min_slu, precision=1)
Q3max_slu = EngNumber(Q31max_slu, precision=2)
Q3min_slu = EngNumber(Q31min_slu, precision=2)
Q32max_slu
```
---
## SLE RARA
### P13 - P16
```
# NEVE PRINCIPALE
Q1max_sle_rara = 8+11.52+3.75 + 11.55+7.98+9.2 + 15.01 + .7*(7.5+14.40) + .6*.5
# CAT. B2 SOLAIO INTERNO
Q1min_sle_rara = 8+11.52+3.75 + 11.55+7.98+9.2 + .5*8.68 + .7*(14.40) + .6*.5 + 7.5
# CAT. B TERRAZZO
8+11.52+3.75 + 11.55+7.98+9.2 + .5*8.6 + .7*(7.5) + .6*.5 +14.40
```
### P16 - P17
```
# NEVE PRINCIPALE
Q2max_sle_rara = 8+6.72+3.75 + 11.55+4.65+9.20 + 10.30 + .7*(7.5+8.4) + .6*.3
Q2max_sle_rara
# CAT. B2 SOLAIO INTERNO
Q2min_sle_rara = 8+6.72+3.75 + 11.55+4.65+9.20 + .5*5.06 + .7*(8.4) + .6*.3+7.5
Q2min_sle_rara
# CAT. B2 TERRAZZA
8+6.72+3.75 + 11.55+4.65+9.20 + .5*5.06 + .7*(7.5) + .6*.3+8.4
```
### P17 - VANO SCALA
```
# NEVE PRINCIPALE
Q3max_sle_rara = 6+3.2+3.75 + 8.66+2.215+9.20 + 4.17 + .7*(5.63+4) + .6*.138
Q3max_sle_rara
# CAT. B2 SOLAIO INTERNO
Q3min_sle_rara = 6+3.2+3.75 + 8.66+2.215+9.20 + .5*2.41 + .7*(4) + .138 + .7*5.63
Q3min_sle_rara
```
---
## SLE FREQUENTE
### P13 - P16
```
# COMBO 1: NEVE PRINCIPALE
Q11_sle_freq = 8+11.52+3.75 + 11.55+7.98+9.2 + .2*15.01 + .3*(7.5+14.40) + 0*.5
# COMBO 2: CAT B (TERRAZZO) PRINCIPALE
Q12_sle_freq = 8+11.52+3.75 + 11.55+7.98+9.2 + 0*15.01 + .3*(7.5)+ .5*(14.40) + .0*.5
# COMBO 3: CAT B2 (SOLAIO INTERNO) PRINCIPALE
Q13_sle_freq = 8+11.52+3.75 + 11.55+7.98+9.2 + 0*15.01 + .5*(7.5)+ .3*(14.40) + .0*.5
print(Q11_sle_freq, Q12_sle_freq, Q13_sle_freq)
Q1max_sle_freq = 8+11.52+3.75 + 11.55+7.98+9.2 + .2*15.01 + .3*(7.5+14.40) + .0*.5
Q1min_sle_freq = 8+11.52+3.75 + 11.55+7.98+9.2 + 0*15.01 + .5*(7.5)+ .3*(14.40) + .0*.5
```
### P16 - P17
```
# COMBO 1: NEVE PRINCIPALE
Q21_sle_freq = 8+6.72+3.75 + 11.55+4.65+9.20 + .2*10.30 + .3*(7.5+8.4) + .0*.3
# COMBO 2: CAT B (TERRAZZO) PRINCIPALE
Q22_sle_freq = 8+6.72+3.75 + 11.55+4.65+9.20 + .0*10.30 + .3*(7.5)+.5*(8.4) + .0*.3
# COMBO 3: CAT B2 (SOLAIO INTERNO) PRINCIPALE
Q23_sle_freq = 8+6.72+3.75 + 11.55+4.65+9.20 + .0*10.30 + .5*(7.5)+.3*(8.4) + .0*.3
print(Q21_sle_freq, Q22_sle_freq, Q23_sle_freq)
Q2max_sle_freq = 8+6.72+3.75 + 11.55+4.65+9.20 + .2*10.30 + .3*(7.5+8.4) + .0*.3
Q2min_sle_freq = 8+6.72+3.75 + 11.55+4.65+9.20 + .0*10.30 + .5*(7.5)+.3*(8.4) + .0*.3
```
### P17 - VANO SCALA
```
# COMBO 1: NEVE PRINCIPALE
Q31_sle_freq = 6+3.2+3.75 + 8.66+2.215+9.20 + .2*4.17 + .3*(5.63+4) + .0*.138
# COMBO 2: CAT B (TERRAZZO) PRINCIPALE
Q32_sle_freq = 6+3.2+3.75 + 8.66+2.215+9.20 + 0*4.17 + .3*(5.63)+.5*(4) + .0*.138
# COMBO 3: CAT B2 (SOLAIO INTERNO) PRINCIPALE
Q33_sle_freq = 6+3.2+3.75 + 8.66+2.215+9.20 + 0*4.17 + .5*(5.63)+.3*(4) + .0*.138
print(Q31_sle_freq, Q32_sle_freq, Q33_sle_freq)
Q3max_sle_freq = 6+3.2+3.75 + 8.66+2.215+9.20 + 0*4.17 + .5*(5.63)+.3*(4) + .0*.138
Q3min_sle_freq = 6+3.2+3.75 + 8.66+2.215+9.20 + 0*4.17 + .3*(5.63)+.5*(4) + .0*.138
```
### SLE QUASI-PERMANENTE
```
Q1_sle_qp = 8+11.52+3.75 + 11.55+7.98+9.2 + 0*15.01 + .3*(7.5+14.40) + .0*.5
Q2_sle_qp = 8+6.72+3.75 + 11.55+4.65+9.20 + 0*10.30 + .3*(7.5+8.4) + .0*.3
Q3_sle_qp = 6+3.2+3.75 + 8.66+2.215+9.20 + 0*4.17 + .3*(5.63+4) + .0*.138
print(Q1_sle_qp, Q2_sle_qp, Q3_sle_qp)
```
---
## P27
### SLU
```
# Combo 1: Neve principale
N11max = 1.3*(4*21.15+27.45+101.52+90.24+101.536+99.50)+1.5*(80.09+130.284+146.59+127.68)+1.5*(67.96+0*14.1+56.40+95.17+110.54+.6*3.9)
# Combo 2: CAT. principale
N12max = 1.3*(4*21.15+27.45+101.52+90.24+101.536+99.50)+1.5*(80.09+130.284+146.59+127.68)+1.5*(.5*67.96+0*14.1+56.40+95.17+110.54+.6*3.9)
# Combo 6: Vento principale
N1min = 1.0*(4*21.15+27.45+101.52+90.24+101.536+99.50)+.8*(80.09+130.284+146.59+127.68)+0*(67.96+0*14.1+.7*56.40+.7*95.17+.7*110.54)-1.5*3.9
print(N11max, N12max, N1min)
max(N11max, N12max, N1min)
```
### SLE RARA
```
# Combo 1: Neve principale
N11 = (4*21.15+27.45+101.52+90.24+101.536+99.50)+(80.09+130.284+146.59+127.68)+(67.96+0*14.1+.7*56.40+.7*95.17+.7*110.54+.6*3.9)
# Combo 2: CAT. principale
N12 = (4*21.15+27.45+101.52+90.24+101.536+99.50)+(80.09+130.284+146.59+127.68)+(.6*67.96+0*14.1+56.40+95.17+110.54+.6*3.9)
# Combo 3: Vento principale
N13 = (4*21.15+27.45+101.52+90.24+101.536+99.50)+(80.09+130.284+146.59+127.68)+(.6*67.96+0*14.1+.7*56.40+.7*95.17+.7*110.54+3.9)
print(N11, N12, N13)
```
### SLE FREQUENTE
```
# Combo 1: Neve principale
N11 = (4*21.15+27.45+101.52+90.24+101.536+99.50)+(80.09+130.284+146.59+127.68)+(.2*67.96+0*14.1+.3*56.40+.3*95.17+.6*110.54+0*3.9)
# Combo 2: CAT. principale
N12 = (4*21.15+27.45+101.52+90.24+101.536+99.50)+(80.09+130.284+146.59+127.68)+(0*67.96+0*14.1+.5*56.40+.5*95.17+.7*110.54+0*3.9)
# Combo 3: Vento principale
N13 = (4*21.15+27.45+101.52+90.24+101.536+99.50)+(80.09+130.284+146.59+127.68)+(0*67.96+0*14.1+.3*56.40+.3*95.17+.6*110.54+0.2*3.9)
print(N11, N12, N13)
```
### SLE QUASI PERMANENTE
```
N1 = (4*21.15+27.45+101.52+90.24+101.536+99.50)+(80.09+130.284+146.59+127.68)+(0*67.96+0*14.1+.3*56.40+.3*95.17+.6*110.54+0*3.9)
N1
```
---
## P36
### SLU
```
# Combo 1: Neve principale
N21max = 1.3*(27.45 + 15+10.8 + 15+9.60 + 33.375+9.60+52.80 +64.8+26.25)+1.5*(8.52+13.86+41.92+13.86+36.55+40.48+83.16)+1.5*(7.23+0*1.5+.6*.414+.7*6+.7*9+.7*66+116.50+.6*2.28+.7*72)
# Combo 2: CAT. principale
N22max = 1.3*(27.45 + 15+10.8 + 15+9.60 + 33.375+9.60+52.80+64.8+26.25)+1.5*(8.52+13.86+41.92+13.86+36.55+40.48+83.16)+1.5*(.5*7.23+0*1.5+.6*.414+6+9+66+.5*116.50+.6*2.28+72)
# Combo 3: Vento principale
N2min = 1*(27.45 + 15+10.8 + 15+9.60 + 33.375+9.60+52.80+64.8+26.25)+.8*(8.52+13.86+41.92+13.86+36.55+40.48+83.16)+0*(.5*7.23+0*1.5+.7*6+.7*9+.7*66+.5*116.50+.6*2.28+72+.6*.414)-1.5*.414
print(N21max, N2min, N22max)
```
### SLE RARA
```
# Combo 1: Neve principale
N21max = (27.45 + 15+10.8 + 15+9.60 + 33.375+9.60+52.80 +64.8+26.25)+(8.52+13.86+41.92+13.86+36.55+40.48+83.16)+(7.23+0*1.5+.6*.414+.7*6+.7*9+.7*66+116.50+.6*2.28+.7*72)
# Combo 2: CAT. principale
N22max = (27.45 + 15+10.8 + 15+9.60 + 33.375+9.60+52.80+64.8+26.25)+(8.52+13.86+41.92+13.86+36.55+40.48+83.16)+(.5*7.23+0*1.5+.6*.414+6+9+66+.5*116.50+.6*2.28+72)
# Combo 3: Vento principale
N23max = (27.45 + 15+10.8 + 15+9.60 + 33.375+9.60+52.80+64.8+26.25)+(8.52+13.86+41.92+13.86+36.55+40.48+83.16)+(.5*7.23+0*1.5+.414+.7*6+.7*9+.7*66+.5*116.50+2.28+.7*72)
print(N21max, N2min, N22max)
```
### SLE FREQUENTE
```
# Combo 1: Neve principale
N21max = (27.45 + 15+10.8 + 15+9.60 + 33.375+9.60+52.80 +64.8+26.25)+(8.52+13.86+41.92+13.86+36.55+40.48+83.16)+(.2*7.23+0*1.5+0*.414+.3*6+.3*9+.3*66+.2*116.50+0*2.28+.6*72)
# Combo 2: CAT. principale
N22max = (27.45 + 15+10.8 + 15+9.60 + 33.375+9.60+52.80 +64.8+26.25)+(8.52+13.86+41.92+13.86+36.55+40.48+83.16)+(0*7.23+0*1.5+0*.414+.5*6+.5*9+.5*66+0*116.50+0*2.28+.7*72)
# Combo 3: Vento principale
N23max = (27.45 + 15+10.8 + 15+9.60 + 33.375+9.60+52.80 +64.8+26.25)+(8.52+13.86+41.92+13.86+36.55+40.48+83.16)+(0*7.23+0*1.5+.2*.414+.3*6+.3*9+.3*66+0*116.50+.2*2.28+.6*72)
print(N21max, N22max, N23max)
```
### SLE QUASI PERMANENTE
```
N2 = (27.45 + 15+10.8 + 15+9.60 + 33.375+9.60+52.80 +64.8+26.25)+(8.52+13.86+41.92+13.86+36.55+40.48+83.16)+(0*7.23+0*1.5+0*.414+.3*6+.3*9+.3*66+0*116.50+0*2.28+.6*72)
N2
```
| github_jupyter |
# Notes (IFE - template)
### `{{cookiecutter.project_name}}::{{cookiecutter.session_id}}`
## 1. Usage
### 1.1. Jupyter
*You can fill inn the MarkDown cells (the cells without "numbering") by double-clicking them. Also remember, press `shift + enter` to execute a cell.*
A couple of useful links:
- [How to write MarkDown](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet#hr)
- [Jupyter notebooks](https://jupyter.org/)
- [cellpy](https://cellpy.readthedocs.io/en/latest/)
### 1.2. The `cellpy` notebooks
This set of notebooks is intended to give you a structure for performing a proper `cellpy` analysis of your data utilising the `cellpy.utils.bathc` tool.
An example of a project structure can be this:
```bash
cellpy_project/
└── experiment_001
├── batch_file.json
├── data
│ ├── external
│ ├── interim
│ ├── processed
│ └── raw
├── out
│ └── note.md
├── notebook: 00_experiment_001_notes.ipynb
├── notebook: 01_experiment_001_loader.ipynb
├── notebook: 02_experiment_001_life.ipynb
├── notebook: 03_experiment_001_cycles.ipynb
├── notebook: 04_experiment_001_ica.ipynb
└── notebook: 05_experiment_001_plots.ipynb
```
The `00_experiment_001_notes.ipynb` notebook can be used for writing a log, as well as giving you some information on how to do stuff.
The `01_experiment_001_loader.ipynb` notebook is the one you use to load the data from the tester(s) and create cellpy-files. You should run this first. There are several options on how to store the cellpy-files and the journal, so chose the option that suits you best. You only have to re-run this notebook if your data needs to be updated.
The next three notebooks use the cellpy-files produced by `01_experiment_001_loader.ipynb`. Try to keep them "understandable" and well structured so that the future you dont get totally lost trying to re-run them.
The last notebook is ment to contain important plots that you would like to be able to use later on. They should preferably be "stand-alone" and not rely on running the other notebooks (as a helpful gesture to the future you).
## 2. Key information about the current experiment
**Experimental-id:** `{{cookiecutter.notebook_name}}`
**Short-name:** `{{cookiecutter.session_id}}`
**Project:** `{{cookiecutter.project_name}}`
**By:** `{{cookiecutter.author_name}}`
**Date:** `{{cookiecutter.date}}`
### Notebooks
- notes and information [link](00_{{cookiecutter.notebook_name}}_notes.ipynb)
- processing raw data [link](01_{{cookiecutter.notebook_name}}_loader.ipynb)
- life [link](02_{{cookiecutter.notebook_name}}_life.ipynb)
- cycles [link](03_{{cookiecutter.notebook_name}}_cycles.ipynb)
- ica [link](04_{{cookiecutter.notebook_name}}_ica.ipynb)
- plots [link](05_{{cookiecutter.notebook_name}}_plots.ipynb)
## 3. Short summary of the experiment before processing
It is often helpful to formulate what you wanted to achieve with your experiment before actually going into depth of the data. I believe that it does not make you "biased" when processing your data, but instead sharpens your mind and motivates you to look more closely on your results. I might be wrong, off course. Then just skip filling in this part.
### Main purpose
(*State the main hypothesis for the current set of experiment*)
### Expected outcome
(*What do you expect to find out? What kind of tests did you perform?*)
### Special considerations
(*State if there are any special considerations for this experiment*)
## 4. Log
(*Here you should fill inn what you have done during your analysis*)
## 5. Summary
It is always helpful to formulate what you learned from your experiment. It is very unlikely that I am wrong here. So, please, don't skip filling in this part.
(*What did you discover?*)
(*What should you do further?*)
## Appendix A - Tips and tricks
### Trick: Use magic to automatically reload modules
This allows you to modify modules (and scripts) in another editor and see the changes within Jupyter without shutting down the kernel.
```python
# in the first notebook cell
%load_ext autoreload
%autoreload 2
```
### Example: Find files if automatic search fails
```python
def add_files_to_pages(p):
raw_folder = pathlib.Path(prms.Paths.rawdatadir)
rf = []
for f in p.index:
print(f, end=": ")
new_rf = list(raw_folder.glob(f"{f}*.res"))
print(new_rf)
rf.append(new_rf)
p.raw_file_names = rf
return p
b.pages = add_files_to_pages(b.pages)
```
### Example: Average nominal capacity and mass calculations
```python
csf = helpers.concatenate_summaries(b) # multi-index dataframe
filter_charge_mah = csf.columns.get_level_values(1) == "charge_capacity_u_mAh_g"
scsf = csf.loc[:, filter_charge_mah]
scsf.columns = scsf.columns.droplevel(1)
df = pd.DataFrame(scsf.mean(), columns=["nom_cap_mAh_g"])
df["mass_old"] = b.pages.masses
nom_cap = 350.0
df["adjusted_mass"] = df.capacity_old * df.mass_old / nom_cap
```
### Example: Filtering concatenated summaries
```python
filter_40 = csf.columns.get_level_values(0).str.contains("TFSi_40")
filter_60 = csf.columns.get_level_values(0).str.contains("TFSi_60")
filter_80 = csf.columns.get_level_values(0).str.contains("TFSi_80")
filter_bad = csf.columns.get_level_values(0).isin(["20200406_TFSi_40_2_02_cc", "20200406_TFSi_60_5_01_cc"])
filter_charge = csf.columns.get_level_values(1) == "Charge_Capacity"
filter_charge_mah = csf.columns.get_level_values(1) == "Charge_Capacity(mAh/g)"
# filter out the charge capacity for the three first cycles exluding the bad cells:
scsf = csf.loc[1:3, filter_charge_mah & ~filter_bad]
# plotting
scsf.columns = scsf.columns.droplevel(1)
scsf.hvplot.scatter(ylim=(0, 5000))
```
### Example: Plot all cycles
```python
voltage_curves = dict()
for label in b.experiment.cell_names:
d = b.experiment.data[label]
curves = d.get_cap(label_cycle_number=True, interpolated=True, number_of_points=100)
if not curves.empty:
curve = hv.Curve(curves, kdims=["capacity", "cycle"],
vdims="voltage").groupby("cycle").overlay()#.opts(show_legend=False)
voltage_curves[label] = curve
else:
print(f"[{label} empty]")
NdLayout_voltage_curves = hv.NdLayout(voltage_curves, kdims='label').cols(3)
palette = 'Spectral'
NdLayout_voltage_curves.opts(
hv.opts.Curve(color=hv.Palette(palette), tools=['hover']),
hv.opts.NdOverlay(shared_axes=False),
hv.opts.NdLayout(),
)
```
### Example: Select first cycle from all cells and save them as a csv-file with headers
```python
import pathlib
import csv
from itertools import zip_longest
# Creating "csv-save-able" structure
header1 = []
header2 = []
out = []
cycle = 1
for label in cell_labels:
print(label, end="")
try:
c = b.experiment.data[label]
sss = c.get_cap(cycle=cycle, interpolated=True, dx=0.01)
cap = sss.capacity
vlt = sss.voltage
out.append(cap)
out.append(vlt)
header1.append(label)
header1.append(label)
header2.append("capacity")
header2.append("voltage")
print(" - OK")
except:
print(" - could not process")
# Saving to csv (list of lists)
filename = "data/processed/first_voltage_curve_for_all_cells.csv"
with open(filename,"w+") as f:
writer = csv.writer(f, delimiter=';')
writer.writerow(header1)
writer.writerow(header2)
for values in zip_longest(*out):
writer.writerow(values)
```
### Example: Film-plot of the evolution of dQ/dV peaks
```python
import holoviews as hv
import cellpy
from cellpy.utils import ica
c = cellpy.get("a/cellpy/file.h5")
first_cycle = 2
last_cycle = 30
cycle = list(range(first_cycle,last_cycle+1))
ica_c, ica_dc = ica.dqdv_frames(c,cycle=cycle, voltage_resolution=0.005, normalizing_factor=1, split=True, tidy=False)
voltages_c = ica_c.voltage.values.ravel()
dqs_c = ica_c.drop("voltage", axis=1)
cycles_c = dqs_c.columns.get_level_values(0).values.ravel().astype('int32')
dqs_c = dqs_c.values.T
voltages_dc = ica_dc.voltage.values.ravel()
dqs_dc = ica_dc.drop("voltage", axis=1)
cycles_dc = dqs_dc.columns.get_level_values(0).values.ravel().astype('int32')
dqs_dc = -dqs_dc.values.T
options = {
"xlabel": "voltage",
"ylabel": "cycle number",
"xlim": (0.2, 0.65),
}
ica_im_c = hv.Image((voltages_c, cycles_c, dqs_c)).opts(title=f"charge", **options)
ica_im_dc = hv.Image((voltages_dc, cycles_dc, dqs_dc)).opts(title=f"discharge", **options)
ica_im_c + ica_im_dc
```
| github_jupyter |
#Crime Data Collection Methodology
-Eric Ramon- Labs 21
Crime Data is available from several official sources. UCR (uniform crime report), NIBRS (National Incident-Based Reporting System) and SRS (Summary Reporting System) are reported on the FBI website.
The NIBRS is a newer standard that will be THE new standard by January 2021. (https://www.fbi.gov/services/cjis/ucr/nibrs)
The UCR is the older standard but is still quite useful (https://www.fbi.gov/services/cjis/ucr/) as it has data from 1979 - 2018. Some data is estimated, and some data is missing.
```
import pandas as pd
import numpy as np
#data in this csv contains estimates in instances of no reporting
df = pd.read_csv("http://s3-us-gov-west-1.amazonaws.com/cg-d4b776d0-d898-4153-90c8-8336f86bdfec/estimated_crimes_1979_2018.csv")
#the null values in 'state_abbr' represent National sums
df.isnull().sum()
df['state_abbr'] = df['state_abbr'].replace(np.nan,'US')
#adds violent crime rate (vcr) and property crime rate (pcr) to dataframe
df['vcr'] = df['violent_crime'] / df['population']
df['pcr'] = df['property_crime'] / df['population']
df.head()
#initialize a new dataframe for exporting
sand = pd.DataFrame(index=None)
sand['state'] = df['state_abbr']
sand['year'] = df['year']
sand['vcr'] = df['vcr']
sand['pcr'] = df['pcr']
sand
#export to csv
sand.to_csv('/content/state_crime.csv', index=False)
```
further research shows the FBI:UCR data lists 9251 cities crime with the same features as df above but is 20000+ cities short compared to our db. (also, city information is done by year and is given in xls format)
SUGGESTED SOLUTION:
add 10 years of crime data 2009-2018 by city, for cities not listed in crime data, give state data and notify users that data is not by city but by state
###SOURCE DATA (by city):
2018 - https://ucr.fbi.gov/crime-in-the-u.s/2018/crime-in-the-u.s.-2018/tables/table-8/table-8.xls
2017 - https://ucr.fbi.gov/crime-in-the-u.s/2017/crime-in-the-u.s.-2017/tables/table-8/table-8.xls
2016 - https://ucr.fbi.gov/crime-in-the-u.s/2016/crime-in-the-u.s.-2016/tables/table-6/table-6.xls
2015 - https://ucr.fbi.gov/crime-in-the-u.s/2015/crime-in-the-u.s.-2015/tables/table-8/table_8_offenses_known_to_law_enforcement_by_state_by_city_2015.xls
2014 - https://ucr.fbi.gov/crime-in-the-u.s/2014/crime-in-the-u.s.-2014/tables/table-8/Table_8_Offenses_Known_to_Law_Enforcement_by_State_by_City_2014.xls
2013 - https://ucr.fbi.gov/crime-in-the-u.s/2013/crime-in-the-u.s.-2013/tables/table-8/table_8_offenses_known_to_law_enforcement_by_state_by_city_2013.xls
2012 - https://ucr.fbi.gov/crime-in-the-u.s/2012/crime-in-the-u.s.-2012/tables/8tabledatadecpdf/table_8_offenses_known_to_law_enforcement_by_state_by_city_2012.xls
2011 - https://ucr.fbi.gov/crime-in-the-u.s/2011/crime-in-the-u.s.-2011/tables/table_8_offenses_known_to_law_enforcement_by_state_by_city_2011.xls
2010 - https://ucr.fbi.gov/crime-in-the-u.s/2010/crime-in-the-u.s.-2010/tables/10tbl08.xls
2009 - https://www2.fbi.gov/ucr/cius2009/data/documents/09tbl08.xls
(may leave out 2008 to have an even 10 years of data, but jic)
2008 - https://www2.fbi.gov/ucr/cius2008/data/documents/08tbl08.xls
```
#read in xls files, skipping the headers and footers
xl2018 = pd.read_excel('/content/Table_8_Offenses_Known_to_Law_Enforcement_by_State_by_City_2018.xls', skiprows=3, skipfooter=10)
xl2017 = pd.read_excel('/content/Table_8_Offenses_Known_to_Law_Enforcement_by_State_by_City_2017.xls', skiprows=3, skipfooter=10)
xl2016 = pd.read_excel('/content/Table_6_Offenses_Known_to_Law_Enforcement_by_State_by_City_2016.xls', skiprows=3, skipfooter=11)
xl2015 = pd.read_excel('/content/Table_8_Offenses_Known_to_Law_Enforcement_by_State_by_City_2015.xls', skiprows=3, skipfooter=10)
xl2014 = pd.read_excel('/content/table-8.xls', skiprows=3, skipfooter=17)
xl2013 = pd.read_excel('/content/Table_8_Offenses_Known_to_Law_Enforcement_by_State_by_City_2013.xls', skiprows=3, skipfooter=10)
xl2012 = pd.read_excel('/content/Table_8_Offenses_Known_to_Law_Enforcement_by_State_by_City_2012.xls', skiprows=3, skipfooter=7)
xl2011 = pd.read_excel('/content/table_8_offenses_known_to_law_enforcement_by_state_by_city_2011.xls', skiprows=3, skipfooter=7)
xl2010 = pd.read_excel('/content/10tbl08.xls', skiprows=3, skipfooter=7)
xl2009 = pd.read_excel('/content/09tbl08.xls', skiprows=3, skipfooter=7)
#build a function to automatically clean the results and add to a new DF for
#import to database
def cleaner(x, year):
"""
Takes a dataframe, changes state abbreviations, changes state NaNs,
calculates violent crime and property crime rate and returns it as
a new DataFrame (city_st, vcr, pcr) for the year passed in
"""
#create new dataframe
df = pd.DataFrame(columns=['city', 'vcr_'+year,'pcr_'+year])
#clean numbers from state column and put into new df
df['city']=x['State'].str.replace('\d+', '')
#clean numbers from city column
x['City'] = x['City'].str.replace('\d+', '')
#clean column names
if 'Violent\ncrime' in x.columns:
x = x.rename(columns={'Violent\ncrime':'Violent crime',
'Property\ncrime':'Property crime'})
#remove null values from column
if x['City'].isnull().sum() >= 1:
print('Replacing null with None...')
x['City'] = x['City'].replace(np.nan, 'None')
#replace states with abbreviations
df['city']= df['city'].replace({"ALABAMA":"AL", "ALASKA":"AK", "ARIZONA":"AZ",
"ARKANSAS":"AK","CALIFORNIA":"CA",
"COLORADO":"CO","CONNECTICUT":"CT",
"DELAWARE":"DE","DISTRICT OF COLUMBIA":"DC",
"FLORIDA":"FL", "GEORGIA":"GA", "HAWAII":"HI",
"IDAHO":"ID", "ILLINOIS":"IL", "INDIANA":"IN",
"IOWA":"IA","KANSAS":"KS", "KENTUCKY":"KY",
"LOUISIANA":"LA", "MAINE":"ME","MARYLAND":"MD",
"MASSACHUSETTS":"MA","MICHIGAN":"MI",
"MINNESOTA":"MN","MISSISSIPPI":"MS",
"MISSOURI":"MI","MONTANA":"MT","NEBRASKA":"NE",
"NEVADA":"NV","NEW HAMPSHIRE":"NH",
"NEW JERSEY":"NJ","NEW MEXICO":"NM",
"NEW YORK":"NY","NORTH CAROLINA":"NC",
"NORTH DAKOTA":"ND","OHIO":"OH",
"OKLAHOMA":"OK", "OREGON":"OR",
"PENNSYLVANIA":"PA","RHODE ISLAND":"RI",
"SOUTH CAROLINA":"SC","SOUTH DAKOTA":"SD",
"TENNESSEE":"TN", "TEXAS":"TX","UTAH":"UT",
"VERMONT":"VT","VIRGINIA":"VA",
"WASHINGTON":"WA", "WEST VIRGINIA":"WV",
"WISCONSIN":"WI", "WYOMING":"WY"})
#iterate through dataframe, replacing nan values with proper state abbr.
state = ""
for i in range(len(df)):
if pd.notnull(df.at[i , 'city']):
if df.at[i, 'city'] != state:
state = df.at[i, 'city']
elif pd.isnull(df.at[i, 'city']):
df.at[i, 'city'] = state
#populate city column 'city, ST'
for i in range(len(df['city'])):
df['city'][i] = x['City'][i] + ", " + df['city'][i]
#populate violent crime rate column
df['vcr_'+year][i] = x['Violent crime'][i] / x['Population'][i]
#populate property crime rate column
df['pcr_'+year][i] = x['Property crime'][i] / x['Population'][i]
#set the index for later concatenation
df.set_index('city')
return df
#run the 10 xls files through the cleaner function
cl18 = cleaner(xl2018, '2018')
cl17 = cleaner(xl2017, '2017')
cl16 = cleaner(xl2016, '2016')
cl15 = cleaner(xl2015, '2015')
cl14 = cleaner(xl2014, '2014')
cl13 = cleaner(xl2013, '2013')
cl12 = cleaner(xl2012, '2012')
cl11 = cleaner(xl2011, '2011')
cl10 = cleaner(xl2010, '2010')
cl09 = cleaner(xl2009, '2009')
cl09
# the following are the steps I made to merge the dataframes
# (side comments are the shape during each step)
cl18.shape # 9252, 3
cl17.shape# 9579,3
masta = pd.merge(cl18,cl17, how='outer', on='city')
print(masta.shape) # 10188, 5
masta['pcr_201 8'].isnull().sum() #939
masta['pcr_2017'].isnull().sum() # 591
masta2 = pd.merge(cl16, cl15, how='outer', on='city')
print(masta2.shape) # 10199, 5
masta2['pcr_2016'].isnull().sum() # 607
masta2['pcr_2015'].isnull().sum() # 777
masta3 = pd.merge(cl14, cl13, how='outer', on='city')
print(masta3.shape) # 10113, 5
masta3['pcr_2014'].isnull().sum() # 750
masta3['pcr_2013'].isnull().sum() # 791
masta4 = pd.merge(cl12, cl11, how='outer', on='city')
print(masta4.shape) # 10275, 5
masta4['vcr_2012'].isnull().sum() # 969
masta4['vcr_2011'].isnull().sum() # 1136
masta5 = pd.merge(cl10, cl09, how='outer', on='city')
print(masta5.shape) # 10110, 5
masta5['pcr_2010'].isnull().sum() # 787
masta5['pcr_2009'].isnull().sum() # 952
master = pd.merge(masta, masta2, how='outer', on='city')
print(master.shape) # 10975, 9
master = pd.merge(master, masta3, how='outer', on='city')
print(master.shape) #12075
master = pd.merge(master, masta4, how='outer', on='city')
print(master.shape) # 14924
master = pd.merge(master, masta5, how='outer', on='city')
print(master.shape) # 24693
master
#export data
master.to_csv('/content/crime.csv', index=False)
```
#Summary and thoughts:
Crime data reporting by city is spotty and after collecting it all togetether, about 10% is missing. Better reporting in the future is hopeful, and there may be no way to collect this data for many years.
While the data is spotty, it is still useful and would be quite interesting to a person investigating a new town or city.
Futher improvements on the data would be to more closely check the city names. I have not been able to find duplicates, but it is technicially possible. Another improvement would be to flatten out the national and state crime dataframe for uniform use with the city data. This can still be effectively utilized though and is not a high priority at the moment.
| github_jupyter |
```
import matplotlib.pyplot as plt
import scipy.sparse as sp
import _pickle as pk
from helpers import load_data
from collaborativeFiltering import *
from cross_validation import k_fold_split, split_matrix
%matplotlib inline
%load_ext autoreload
%autoreload 2
def save(obj, path):
print('Saving at path : {}'.format(path))
pk.dump(obj, open(path, 'wb'))
print('Done')
def load(path):
return pk.load(open(path, 'rb'))
```
# Load data
```
path_dataset = "../data/data_train.csv"
ratings = load_data(path_dataset)
pred = load_data("../data/films_CF_train_inf.csv")
print(RMSE(pred, ratings))
user_pred = load_data('../data/users_CF_prediction_inf.csv')
film_pred = load_data('../data/films_CF_prediction_inf.csv')
mean = 0.5 * (user_pred + film_pred)
sortTrainData('../data/mean_prediction_inf.csv', mean)
```
# Test with film standard deviation
```
denseRatings = sparseToDense(ratings)
boolViewings = booleanViewings(ratings)
_, film_means, _filmstdDevs = normalizeRatings(np.transpose(denseRatings), np.transpose(boolViewings))
normRatings, userMeans, userStdDevs = normalizeRatings(denseRatings, boolViewings)
similarities, user_commonViewings = buildGraph(normRatings, boolViewings, _filmstdDevs)
neighbors = buildNeighbors(user_commonViewings)
sorted_neighbors = sortNeighbors(neighbors, similarities)
nF, nU = ratings.shape
model = {'nF': nF, 'nU': nU, 'boolViewings': boolViewings, 'denseRatings': denseRatings, 'normRatings': normRatings, 'userMeans': userMeans, 'userStdDevs': userStdDevs, 'similarities': similarities, 'user_commonViewings': user_commonViewings, 'sorted_neighbors': sorted_neighbors}
save(model, 'model_users_stdDev.pkl')
k_list = [10]
usersModel_predictionErrorsOverk(k_list, model)
create_prediction_file_usersModel('../data/users_CF_prediction_inf_stdDev.csv', testSet, model, k=1000)
create_prediction_file_usersModel('../data/users_CF_train_inf_stdDev.csv', trainSet, model, k=1000)
pred_stdDev = load_data('../data/users_CF_train_inf_stdDev.csv')
print(RMSE(pred_stdDev, ratings))
```
# Build model
```
model = load('model_users_full.pkl')
print('Model loaded')
neighbors = buildNeighbors(model['user_commonViewings'])
print('Neighbors built')
model['sorted_neighbors'] = sortNeighbors(neighbors, model['similarities'])
print('Neighbors sorted')
del model['similarities']
del model['user_commonViewings']
save(model, 'prediction_model_users_log.pkl')
print('Done')
k_list = [10]
usersModel_predictionErrorsOverk(k_list, model)
k_list = [10]
model_noLog = load('model_users_full.pkl')
usersModel_predictionErrorsOverk(k_list, model_noLog)
test_path = '../data/sample_submission.csv'
testSet = load_data(test_path)
create_prediction_file_usersModel('../data/users_CF_prediction_inf_log.csv', testSet, model, k=1000)
pred_log = load_data('../data/users_CF_train_inf_log.csv')
pred = load_data('../data/users_CF_train_inf.csv')
print(RMSE(pred, ratings))
print(RMSE(pred_log, ratings))
print(RMSE(pred_log, pred))
train_path = '../data/data_train.csv'
trainSet = load_data(train_path)
create_prediction_file_usersModel('../data/users_CF_train_inf_log.csv', trainSet, model, k=1000)
model = buildModel(ratings)
save(model, 'model_users_full.pkl')
```
# Prediction
```
model = load('model_users_full.pkl')
k_list = [10]
trainingErrorOverk_(k_list, model)
```
# Cross validation
```
folds = k_fold_split(ratings, min_num_ratings=0, k=4)
models = []
trainSets = []
testSets = []
for k in range(4):
print('Building model for fold #{}/{}'.format(k+1, 4))
train, test = split_matrix(ratings, folds[k])
print('Train non zero :')
print(train.nnz)
print('Test non zero :')
print(test.nnz)
trainSets.append(train)
testSets.append(test)
m = buildModel(train)
models.append(m)
np.sum(model['boolViewings'], axis=0)
k_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
train_errors = []
test_errors = []
for fold in range(4):
print('Fold #{}/{}'.format(fold + 1, 4))
tr_error, te_error = trainingErrorOverk(k_list, models[fold], testSets[fold])
train_errors.append(tr_error)
test_errors.append(te_error)
tr_e = np.array(train_errors)
te_e = np.array(test_errors)
plt.plot(np.mean(tr_e, axis=0))
plt.plot(np.mean(te_e, axis=0))
a = np.array([[1, 2, 3,], [4, 5, 6]])
print(a.transpose())
model_films_full = buildFilmsModel(ratings)
print('Model built')
save(model_films_full, 'model_films_full.pkl')
neighbors = buildNeighbors(model_films_full['film_commonViewings'])
sortNeighbors(neighbors, model_films_full['similarities'])
save(neighbors, 'neighbors_temp.pkl')
model_films_full = load('model_films_full.pkl')
neighbors = load('neighbors_temp.pkl')
sortedNeighbors = sortNeighbors(neighbors, model_films_full['similarities'])
save(sortedNeighbors, 'sortedNeighbors.pkl')
trainSets = []
testSets = []
for k in range(4):
print('Building model for fold #{}/{}'.format(k+1, 4))
train, test = split_matrix(ratings, folds[k])
print('Train non zero :')
print(train.nnz)
print('Test non zero :')
print(test.nnz)
trainSets.append(train)
testSets.append(test)
m = buildFilmsModel(train)
save(m, 'model_films_{}.pkl'.format(k+1))
del m
model_films = load('model_films_full.pkl')
del model_films['denseRatings']
del model_films['boolViewings']
del model_films['normRatings']
del model_films['userMeans']
del model_films['userStdDevs']
del model_films['film_commonViewings']
neighbors = load('neighbors_temp.pkl')
sortedNeighbors = sortNeighbors(neighbors, model_films['similarities'], 0, 10000)
save(sortedNeighbors, 'sortedNeighbors.pkl')
del sortedNeighbors
```
| github_jupyter |
# Project 2: Transistors and Amplifiers
This project will introduce two basic techniques for using currents and voltages to control currents and voltages. Why would you want to do that? It turns out that many times the physical system we're working with is not *directly* compatable with the tools we have to control or measure that system. For example, suppose we want to control the temperature of an experiment. It turns out there are materials with properties that depend on temperature (e.g., resistance, band gap voltage, and so on). If we can somehow turn those properties into electrical signals, we can use those signals to infer the temperature of the system. The bad news is that those signals are usually *not* a voltage from 0-5V that can simply be connected to the Arduino for measurement. We often (most of the time!) need to process those electrical signals in order to get something that an Arduino can measure. That's where amplifiers can be extremely handy. A transistor is, sort of, the worlds simplest example of a crude, but effective, amplifier. *So* first we'll learn how a transistor works, and then we'll jump straight to an operational amplifier (which is really just a complex circuit, containing many transistors, so that it does the job much more effectively).
## The NPN Biploar Junction Transistor (BJT)
The schematic symbol for an NPN [Bipolar Junction Transistor (BJT)](https://en.wikipedia.org/wiki/Bipolar_junction_transistor) looks like this:

The pins are called 1) Emitter, 2) Base, 3) Collector. You can generally think of the Base-Emitter (BE) as a diode (hence the little diode symbol embedded in the picture). The BE diode obeys the Shockley equation over the range of currents and voltages we're goign to be using. The magic comes when we look at the behavior of the CE connection. It turns out the current through the CE junction is *proportional* to the current through the BE junction. The proportionality constant is called the *current* *gain* $\beta$.
$$ I_{ce} = \beta I_{be} $$
What's the use? Because $\beta$ can be a large number (think 20 or 50) a small change in $I_{be}$ can produce a large change in $I_{ce}$ which means signals can be *amplified*.
The first thing we need to do is to set up a transistor to mesure $\beta$ and see how that works. Set up the following circuit. For this one, you can re-use the same RC circuit on the PWM output of the Arduino that you used last time. Suppose the max current through the BE junction should be $0.1\ {\rm mA}$ and the max current through the CE junction should be $\approx 3 {\rm mA}$. Roughly, how big should R2 and R3 be in this case?

```
#
# Load up some packages
#
import numpy as np
import matplotlib.pyplot as pl
import pandas as pd
```
# Estimate $\beta$
Now run an experiment similar to last week's project but this time collect data to estiamate $\beta$. There should be a range of $I_{be}$ values in which $I_{ce}$ is increasing proportionally. Identify this region, and use the data you collect to estimate $\beta$. You'll be given a [2N3904](https://ace.uindy.edu/access/content/group/202020-PHYS-230-02/Data%20Sheets/2N3904.pdf) transistor. Look up the *pinout* of the transistor you're given and make sure you connect it as shown in the schematic diagram. If you have questions, please *ask*!
# Operational Amplifiers
Next we're going to learn about the behavior of *operational* *amplifiers* (or "opamp"). An operational amplifier is simply a collection of transistors designed to multiply the difference between two input voltages by a large constant factor (called the "open loop gain") and set the output voltage to the result of this "operation":
$$V_{\rm out} = G(V_+ - V_-)$$
The schematic diagram below shows the internal plumbing of the TI version of one unit of the [LM358N](https://ace.uindy.edu/access/content/group/202020-PHYS-230-02/Data%20Sheets/lm358-n.pdf) opamp:

$$\text{One unit of the popular TI LM538N}$$
You can see that there are *lots* of transistors involved. We're going to construct a simple "2x" multiplier using the opamp as a tool in our toolkit to make it work. To begin, use the bench function generator to get an input signal that looks something like this:
```
df=pd.read_csv('triangle.csv', sep='\t')
t = df[df['time']>0.03]['time'].values
V = df[df['time']>0.03]['V(C)'].values
pl.plot(t,V,label="Vin")
pl.grid()
pl.legend(loc=2)
pl.ylabel("Vin (Volts)")
pl.title("Input Voltage from Function Generator")
```
# Amplify this signal
Now, we'll use this signal as an input to an amplifier with the idea of multiplying the voltage by a factor of 2. How? We'll use the LM358N as an *amplifier*. The schematic symbol for an amplifier is a sideways isosceles triangle like so (This example is an [LM358N](https://ace.uindy.edu/access/content/group/202020-PHYS-230-02/Data%20Sheets/lm358-n.pdf), but there are many brands and types of amplifiers):

The amplifier is designed to do whatever it can to make it's "+" and "-" inputs equal to one another. If the "+" is greater than the "-" the amplifier *increases* its output. It's up the to designer to set up the circuit so that this has the effect of lowering the difference of the "+" and "-" input. Similarly, if the "+" input is less than the "-" input the output of the amplifier *decreases* its output. The output keeps increasing or decreasing until either the output reaches a limit (positive or negative supply limits usually) or it is successful in making the two inputs nearly equal (within fractions of a millivolt). Consider the following circuit:

As an example, suppose R3 is 10k and R2 is 1k. Suppose further than the voltage on the "+" input is 0.3V. What will the output of the amplifier have to be to make the two inputs equal. Will the recipe described in the preceding paragraph accomplish this? If you have trouble with this question, please ask! It's not hard, but it may take some practice to remember enough about your basic circuit physics to figure it out. Be patient, but don't give up! Ask if you have questions.
## Experiment
Now, design an amplifier circuit that will produce a gain of 2 using the circuit given above. Use the sawtooth waveform as an input, and use an oscilloscope to monitor the input and output voltages to measure the effect. We could, of course, write an program and use the Artemis Nano to measure these results, but we'll keep it simple and give you some experience with the bench tools.
| github_jupyter |
<img width="200" src="https://mmlspark.blob.core.windows.net/graphics/Readme/cog_services_on_spark_2.svg" />
# Cognitive Services
[Azure Cognitive Services](https://azure.microsoft.com/en-us/services/cognitive-services/) are a suite of APIs, SDKs, and services available to help developers build intelligent applications without having direct AI or data science skills or knowledge by enabling developers to easily add cognitive features into their applications. The goal of Azure Cognitive Services is to help developers create applications that can see, hear, speak, understand, and even begin to reason. The catalog of services within Azure Cognitive Services can be categorized into five main pillars - Vision, Speech, Language, Web Search, and Decision.
## Usage
### Vision
[**Computer Vision**](https://azure.microsoft.com/en-us/services/cognitive-services/computer-vision/)
- Describe: provides description of an image in human readable language ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/DescribeImage.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.DescribeImage))
- Analyze (color, image type, face, adult/racy content): analyzes visual features of an image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/AnalyzeImage.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.AnalyzeImage))
- OCR: reads text from an image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/OCR.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.OCR))
- Recognize Text: reads text from an image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/RecognizeText.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.RecognizeText))
- Thumbnail: generates a thumbnail of user-specified size from the image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/GenerateThumbnails.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.GenerateThumbnails))
- Recognize domain-specific content: recognizes domain-specific content (celebrity, landmark) ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/RecognizeDomainSpecificContent.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.RecognizeDomainSpecificContent))
- Tag: identifies list of words that are relevant to the in0put image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/TagImage.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.TagImage))
[**Face**](https://azure.microsoft.com/en-us/services/cognitive-services/face/)
- Detect: detects human faces in an image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/DetectFace.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.DetectFace))
- Verify: verifies whether two faces belong to a same person, or a face belongs to a person ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/VerifyFaces.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.VerifyFaces))
- Identify: finds the closest matches of the specific query person face from a person group ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/IdentifyFaces.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.IdentifyFaces))
- Find similar: finds similar faces to the query face in a face list ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/FindSimilarFace.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.FindSimilarFace))
- Group: divides a group of faces into disjoint groups based on similarity ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/GroupFaces.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.GroupFaces))
### Speech
[**Speech Services**](https://azure.microsoft.com/en-us/services/cognitive-services/speech-services/)
- Speech-to-text: transcribes audio streams ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/SpeechToText.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.SpeechToText))
### Language
[**Text Analytics**](https://azure.microsoft.com/en-us/services/cognitive-services/text-analytics/)
- Language detection: detects language of the input text ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/LanguageDetector.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.LanguageDetector))
- Key phrase extraction: identifies the key talking points in the input text ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/KeyPhraseExtractor.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.KeyPhraseExtractor))
- Named entity recognition: identifies known entities and general named entities in the input text ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/NER.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.NER))
- Sentiment analysis: returns a score betwee 0 and 1 indicating the sentiment in the input text ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/TextSentiment.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.TextSentiment))
[**Translator**](https://azure.microsoft.com/en-us/services/cognitive-services/translator/)
- Translate: Translates text. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/Translate.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.Translate))
- Transliterate: Converts text in one language from one script to another script. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/Transliterate.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.Transliterate))
- Detect: Identifies the language of a piece of text. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/Detect.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.Detect))
- BreakSentence: Identifies the positioning of sentence boundaries in a piece of text. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/BreakSentence.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.BreakSentence))
- Dictionary Lookup: Provides alternative translations for a word and a small number of idiomatic phrases. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/DictionaryLookup.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.DictionaryLookup))
- Dictionary Examples: Provides examples that show how terms in the dictionary are used in context. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/DictionaryExamples.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.DictionaryExamples))
- Document Translation: Translates documents across all supported languages and dialects while preserving document structure and data format. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/DocumentTranslator.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.DocumentTranslator))
### Azure Form Recognizer
[**Form Recognizer**](https://azure.microsoft.com/en-us/services/form-recognizer/)
- Analyze Layout: Extract text and layout information from a given document. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/AnalyzeLayout.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.AnalyzeLayout))
- Analyze Receipts: Detects and extracts data from receipts using optical character recognition (OCR) and our receipt model, enabling you to easily extract structured data from receipts such as merchant name, merchant phone number, transaction date, transaction total, and more. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/AnalyzeReceipts.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.AnalyzeReceipts))
- Analyze Business Cards: Detects and extracts data from business cards using optical character recognition (OCR) and our business card model, enabling you to easily extract structured data from business cards such as contact names, company names, phone numbers, emails, and more. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/AnalyzeBusinessCards.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.AnalyzeBusinessCards))
- Analyze Invoices: Detects and extracts data from invoices using optical character recognition (OCR) and our invoice understanding deep learning models, enabling you to easily extract structured data from invoices such as customer, vendor, invoice ID, invoice due date, total, invoice amount due, tax amount, ship to, bill to, line items and more. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/AnalyzeInvoices.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.AnalyzeInvoices))
- Analyze ID Documents: Detects and extracts data from identification documents using optical character recognition (OCR) and our ID document model, enabling you to easily extract structured data from ID documents such as first name, last name, date of birth, document number, and more. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/AnalyzeIDDocuments.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.AnalyzeIDDocuments))
- Analyze Custom Form: Extracts information from forms (PDFs and images) into structured data based on a model created from a set of representative training forms. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/AnalyzeCustomModel.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.AnalyzeCustomModel))
- Get Custom Model: Get detailed information about a custom model. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/GetCustomModel.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/ListCustomModels.html))
- List Custom Models: Get information about all custom models. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/ListCustomModels.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.ListCustomModels))
### Decision
[**Anomaly Detector**](https://azure.microsoft.com/en-us/services/cognitive-services/anomaly-detector/)
- Anomaly status of latest point: generates a model using preceding points and determines whether the latest point is anomalous ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/DetectLastAnomaly.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.DetectLastAnomaly))
- Find anomalies: generates a model using an entire series and finds anomalies in the series ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/DetectAnomalies.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.DetectAnomalies))
### Search
- [Bing Image search](https://azure.microsoft.com/en-us/services/cognitive-services/bing-image-search-api/) ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/com/microsoft/azure/synapse/ml/cognitive/BingImageSearch.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.BingImageSearch))
- [Azure Cognitive search](https://docs.microsoft.com/en-us/azure/search/search-what-is-azure-search) ([Scala](https://mmlspark.blob.core.windows.net/docs/0.9.4/scala/index.html#com.microsoft.azure.synapse.ml.cognitive.AzureSearchWriter$), [Python](https://mmlspark.blob.core.windows.net/docs/0.9.4/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.AzureSearchWriter))
## Prerequisites
1. Follow the steps in [Getting started](https://docs.microsoft.com/en-us/azure/cognitive-services/big-data/getting-started) to set up your Azure Databricks and Cognitive Services environment. This tutorial shows you how to install SynapseML and how to create your Spark cluster in Databricks.
1. After you create a new notebook in Azure Databricks, copy the **Shared code** below and paste into a new cell in your notebook.
1. Choose a service sample, below, and copy paste it into a second new cell in your notebook.
1. Replace any of the service subscription key placeholders with your own key.
1. Choose the run button (triangle icon) in the upper right corner of the cell, then select **Run Cell**.
1. View results in a table below the cell.
## Shared code
To get started, we'll need to add this code to the project:
```
from pyspark.sql.functions import udf, col
from synapse.ml.io.http import HTTPTransformer, http_udf
from requests import Request
from pyspark.sql.functions import lit
from pyspark.ml import PipelineModel
from pyspark.sql.functions import col
import os
if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia":
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
from notebookutils.mssparkutils.credentials import getSecret
os.environ['ANOMALY_API_KEY'] = getSecret(
"mmlspark-keys", "anomaly-api-key")
os.environ['TEXT_API_KEY'] = getSecret("mmlspark-keys", "mmlspark-cs-key")
os.environ['BING_IMAGE_SEARCH_KEY'] = getSecret(
"mmlspark-keys", "mmlspark-bing-search-key")
os.environ['VISION_API_KEY'] = getSecret(
"mmlspark-keys", "mmlspark-cs-key")
os.environ['AZURE_SEARCH_KEY'] = getSecret(
"mmlspark-keys", "azure-search-key")
from synapse.ml.cognitive import *
# A general Cognitive Services key for Text Analytics, Computer Vision and Form Recognizer (or use separate keys that belong to each service)
service_key = os.environ["COGNITIVE_SERVICE_KEY"]
# A Bing Search v7 subscription key
bing_search_key = os.environ["BING_IMAGE_SEARCH_KEY"]
# An Anomaly Dectector subscription key
anomaly_key = os.environ["ANOMALY_API_KEY"]
# A Translator subscription key
translator_key = os.environ["TRANSLATOR_KEY"]
```
## Text Analytics sample
The [Text Analytics](https://azure.microsoft.com/en-us/services/cognitive-services/text-analytics/) service provides several algorithms for extracting intelligent insights from text. For example, we can find the sentiment of given input text. The service will return a score between 0.0 and 1.0 where low scores indicate negative sentiment and high score indicates positive sentiment. This sample uses three simple sentences and returns the sentiment for each.
```
# Create a dataframe that's tied to it's column names
df = spark.createDataFrame([
("I am so happy today, its sunny!", "en-US"),
("I am frustrated by this rush hour traffic", "en-US"),
("The cognitive services on spark aint bad", "en-US"),
], ["text", "language"])
# Run the Text Analytics service with options
sentiment = (TextSentiment()
.setTextCol("text")
.setLocation("eastus")
.setSubscriptionKey(service_key)
.setOutputCol("sentiment")
.setErrorCol("error")
.setLanguageCol("language"))
# Show the results of your text query in a table format
display(sentiment.transform(df).select("text", col(
"sentiment")[0].getItem("sentiment").alias("sentiment")))
```
## Translator sample
[Translator](https://azure.microsoft.com/en-us/services/cognitive-services/translator/) is a cloud-based machine translation service and is part of the Azure Cognitive Services family of cognitive APIs used to build intelligent apps. Translator is easy to integrate in your applications, websites, tools, and solutions. It allows you to add multi-language user experiences in 90 languages and dialects and can be used for text translation with any operating system. In this sample, we do a simple text translation by providing the sentences you want to translate and target languages you want to translate to.
```
from pyspark.sql.functions import col, flatten
# Create a dataframe including sentences you want to translate
df = spark.createDataFrame([
(["Hello, what is your name?", "Bye"],)
], ["text",])
# Run the Translator service with options
translate = (Translate()
.setSubscriptionKey(translator_key)
.setLocation("eastus")
.setTextCol("text")
.setToLanguage(["zh-Hans"])
.setOutputCol("translation"))
# Show the results of the translation.
display(translate
.transform(df)
.withColumn("translation", flatten(col("translation.translations")))
.withColumn("translation", col("translation.text"))
.select("translation"))
```
## Form Recognizer sample
[Form Recognizer](https://azure.microsoft.com/en-us/services/form-recognizer/) is a part of Azure Applied AI Services that lets you build automated data processing software using machine learning technology. Identify and extract text, key/value pairs, selection marks, tables, and structure from your documents—the service outputs structured data that includes the relationships in the original file, bounding boxes, confidence and more. In this sample, we analyze a business card image and extract its information into structured data.
```
from pyspark.sql.functions import col, explode
# Create a dataframe containing the source files
imageDf = spark.createDataFrame([
("https://mmlspark.blob.core.windows.net/datasets/FormRecognizer/business_card.jpg",)
], ["source",])
# Run the Form Recognizer service
analyzeBusinessCards = (AnalyzeBusinessCards()
.setSubscriptionKey(service_key)
.setLocation("eastus")
.setImageUrlCol("source")
.setOutputCol("businessCards"))
# Show the results of recognition.
display(analyzeBusinessCards
.transform(imageDf)
.withColumn("documents", explode(col("businessCards.analyzeResult.documentResults.fields")))
.select("source", "documents"))
```
## Computer Vision sample
[Computer Vision](https://azure.microsoft.com/en-us/services/cognitive-services/computer-vision/) analyzes images to identify structure such as faces, objects, and natural-language descriptions. In this sample, we tag a list of images. Tags are one-word descriptions of things in the image like recognizable objects, people, scenery, and actions.
```
# Create a dataframe with the image URLs
df = spark.createDataFrame([
("https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/objects.jpg", ),
("https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/dog.jpg", ),
("https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/house.jpg", )
], ["image", ])
# Run the Computer Vision service. Analyze Image extracts infortmation from/about the images.
analysis = (AnalyzeImage()
.setLocation("eastus")
.setSubscriptionKey(service_key)
.setVisualFeatures(["Categories", "Color", "Description", "Faces", "Objects", "Tags"])
.setOutputCol("analysis_results")
.setImageUrlCol("image")
.setErrorCol("error"))
# Show the results of what you wanted to pull out of the images.
display(analysis.transform(df).select(
"image", "analysis_results.description.tags"))
```
## Bing Image Search sample
[Bing Image Search](https://azure.microsoft.com/en-us/services/cognitive-services/bing-image-search-api/) searches the web to retrieve images related to a user's natural language query. In this sample, we use a text query that looks for images with quotes. It returns a list of image URLs that contain photos related to our query.
```
# Number of images Bing will return per query
imgsPerBatch = 10
# A list of offsets, used to page into the search results
offsets = [(i*imgsPerBatch,) for i in range(100)]
# Since web content is our data, we create a dataframe with options on that data: offsets
bingParameters = spark.createDataFrame(offsets, ["offset"])
# Run the Bing Image Search service with our text query
bingSearch = (BingImageSearch()
.setSubscriptionKey(bing_search_key)
.setOffsetCol("offset")
.setQuery("Martin Luther King Jr. quotes")
.setCount(imgsPerBatch)
.setOutputCol("images"))
# Transformer that extracts and flattens the richly structured output of Bing Image Search into a simple URL column
getUrls = BingImageSearch.getUrlTransformer("images", "url")
# This displays the full results returned, uncomment to use
# display(bingSearch.transform(bingParameters))
# Since we have two services, they are put into a pipeline
pipeline = PipelineModel(stages=[bingSearch, getUrls])
# Show the results of your search: image URLs
display(pipeline.transform(bingParameters))
```
## Speech-to-Text sample
The [Speech-to-text](https://azure.microsoft.com/en-us/services/cognitive-services/speech-services/) service converts streams or files of spoken audio to text. In this sample, we transcribe one audio file.
```
# Create a dataframe with our audio URLs, tied to the column called "url"
df = spark.createDataFrame([("https://mmlspark.blob.core.windows.net/datasets/Speech/audio2.wav",)
], ["url"])
# Run the Speech-to-text service to translate the audio into text
speech_to_text = (SpeechToTextSDK()
.setSubscriptionKey(service_key)
.setLocation("eastus")
.setOutputCol("text")
.setAudioDataCol("url")
.setLanguage("en-US")
.setProfanity("Masked"))
# Show the results of the translation
display(speech_to_text.transform(df).select("url", "text.DisplayText"))
```
## Anomaly Detector sample
[Anomaly Detector](https://azure.microsoft.com/en-us/services/cognitive-services/anomaly-detector/) is great for detecting irregularities in your time series data. In this sample, we use the service to find anomalies in the entire time series.
```
# Create a dataframe with the point data that Anomaly Detector requires
df = spark.createDataFrame([
("1972-01-01T00:00:00Z", 826.0),
("1972-02-01T00:00:00Z", 799.0),
("1972-03-01T00:00:00Z", 890.0),
("1972-04-01T00:00:00Z", 900.0),
("1972-05-01T00:00:00Z", 766.0),
("1972-06-01T00:00:00Z", 805.0),
("1972-07-01T00:00:00Z", 821.0),
("1972-08-01T00:00:00Z", 20000.0),
("1972-09-01T00:00:00Z", 883.0),
("1972-10-01T00:00:00Z", 898.0),
("1972-11-01T00:00:00Z", 957.0),
("1972-12-01T00:00:00Z", 924.0),
("1973-01-01T00:00:00Z", 881.0),
("1973-02-01T00:00:00Z", 837.0),
("1973-03-01T00:00:00Z", 9000.0)
], ["timestamp", "value"]).withColumn("group", lit("series1"))
# Run the Anomaly Detector service to look for irregular data
anamoly_detector = (SimpleDetectAnomalies()
.setSubscriptionKey(anomaly_key)
.setLocation("eastus")
.setTimestampCol("timestamp")
.setValueCol("value")
.setOutputCol("anomalies")
.setGroupbyCol("group")
.setGranularity("monthly"))
# Show the full results of the analysis with the anomalies marked as "True"
display(anamoly_detector.transform(df).select(
"timestamp", "value", "anomalies.isAnomaly"))
```
## Arbitrary web APIs
With HTTP on Spark, any web service can be used in your big data pipeline. In this example, we use the [World Bank API](http://api.worldbank.org/v2/country/) to get information about various countries around the world.
```
# Use any requests from the python requests library
def world_bank_request(country):
return Request("GET", "http://api.worldbank.org/v2/country/{}?format=json".format(country))
# Create a dataframe with spcificies which countries we want data on
df = (spark.createDataFrame([("br",), ("usa",)], ["country"])
.withColumn("request", http_udf(world_bank_request)(col("country"))))
# Much faster for big data because of the concurrency :)
client = (HTTPTransformer()
.setConcurrency(3)
.setInputCol("request")
.setOutputCol("response"))
# Get the body of the response
def get_response_body(resp):
return resp.entity.content.decode()
# Show the details of the country data returned
display(client.transform(df)
.select("country", udf(get_response_body)(col("response"))
.alias("response")))
```
## Azure Cognitive search sample
In this example, we show how you can enrich data using Cognitive Skills and write to an Azure Search Index using SynapseML.
```
VISION_API_KEY = os.environ['VISION_API_KEY']
AZURE_SEARCH_KEY = os.environ['AZURE_SEARCH_KEY']
search_service = "mmlspark-azure-search"
search_index = "test-33467690"
df = spark.createDataFrame([("upload", "0", "https://mmlspark.blob.core.windows.net/datasets/DSIR/test1.jpg"),
("upload", "1", "https://mmlspark.blob.core.windows.net/datasets/DSIR/test2.jpg")],
["searchAction", "id", "url"])
tdf = AnalyzeImage()\
.setSubscriptionKey(VISION_API_KEY)\
.setLocation("eastus")\
.setImageUrlCol("url")\
.setOutputCol("analyzed")\
.setErrorCol("errors")\
.setVisualFeatures(["Categories", "Tags", "Description", "Faces", "ImageType", "Color", "Adult"])\
.transform(df).select("*", "analyzed.*")\
.drop("errors", "analyzed")
tdf.writeToAzureSearch(subscriptionKey=AZURE_SEARCH_KEY,
actionCol="searchAction",
serviceName=search_service,
indexName=search_index,
keyCol="id")
```
| github_jupyter |
# Fitting the distribution of heights data
## Instructions
In this assessment you will write code to perform a steepest descent to fit a Gaussian model to the distribution of heights data that was first introduced in *Mathematics for Machine Learning: Linear Algebra*.
The algorithm is the same as you encountered in *Gradient descent in a sandpit* but this time instead of descending a pre-defined function, we shall descend the $\chi^2$ (chi squared) function which is both a function of the parameters that we are to optimise, but also the data that the model is to fit to.
## How to submit
Complete all the tasks you are asked for in the worksheet. When you have finished and are happy with your code, press the **Submit Assingment** button at the top of this notebook.
## Get started
Run the cell below to load dependancies and generate the first figure in this worksheet.
```
# Run this cell first to load the dependancies for this assessment,
# and generate the first figure.
from readonly.HeightsModule import *
```
## Background
If we have data for the heights of people in a population, it can be plotted as a histogram, i.e., a bar chart where each bar has a width representing a range of heights, and an area which is the probability of finding a person with a height in that range.
We can look to model that data with a function, such as a Gaussian, which we can specify with two parameters, rather than holding all the data in the histogram.
The Gaussian function is given as,
$$f(\mathbf{x};\mu, \sigma) = \frac{1}{\sigma\sqrt{2\pi}}\exp\left(-\frac{(\mathbf{x} - \mu)^2}{2\sigma^2}\right)$$
The figure above shows the data in orange, the model in magenta, and where they overlap in green.
This particular model has not been fit well - there is not a strong overlap.
Recall from the videos the definition of $\chi^2$ as the squared difference of the data and the model, i.e $\chi^2 = |\mathbf{y} - f(\mathbf{x};\mu, \sigma)|^2$. This is represented in the figure as the sum of the squares of the pink and orange bars.
Don't forget that $\mathbf{x}$ an $\mathbf{y}$ are represented as vectors here, as these are lists of all of the data points, the |*abs-squared*|${}^2$ encodes squaring and summing of the residuals on each bar.
To improve the fit, we will want to alter the parameters $\mu$ and $\sigma$, and ask how that changes the $\chi^2$.
That is, we will need to calculate the Jacobian,
$$ \mathbf{J} = \left[ \frac{\partial ( \chi^2 ) }{\partial \mu} , \frac{\partial ( \chi^2 ) }{\partial \sigma} \right]\;. $$
Let's look at the first term, $\frac{\partial ( \chi^2 ) }{\partial \mu}$, using the multi-variate chain rule, this can be written as,
$$ \frac{\partial ( \chi^2 ) }{\partial \mu} = -2 (\mathbf{y} - f(\mathbf{x};\mu, \sigma)) \cdot \frac{\partial f}{\partial \mu}(\mathbf{x};\mu, \sigma)$$
With a similar expression for $\frac{\partial ( \chi^2 ) }{\partial \sigma}$; try and work out this expression for yourself.
The Jacobians rely on the derivatives $\frac{\partial f}{\partial \mu}$ and $\frac{\partial f}{\partial \sigma}$.
Write functions below for these.
```
# PACKAGE
import matplotlib.pyplot as plt
import numpy as np
# GRADED FUNCTION
# This is the Gaussian function.
def f (x,mu,sig) :
return np.exp(-(x-mu)**2/(2*sig**2)) / np.sqrt(2*np.pi) / sig
# Next up, the derivative with respect to μ.
# If you wish, you may want to express this as f(x, mu, sig) multiplied by chain rule terms.
# === COMPLETE THIS FUNCTION ===
def dfdmu (x,mu,sig) :
return f(x, mu, sig) * (x - mu) / (sig**2)
# Finally in this cell, the derivative with respect to σ.
# === COMPLETE THIS FUNCTION ===
def dfdsig (x,mu,sig) :
return -f(x,mu,sig)/sig+f(x, mu, sig) * ((x - mu)**2) / (sig**3)
```
Next recall that steepest descent shall move around in parameter space proportional to the negative of the Jacobian,
i.e., $\begin{bmatrix} \delta\mu \\ \delta\sigma \end{bmatrix} \propto -\mathbf{J} $, with the constant of proportionality being the *aggression* of the algorithm.
Modify the function below to include the $\frac{\partial ( \chi^2 ) }{\partial \sigma}$ term of the Jacobian, the $\frac{\partial ( \chi^2 ) }{\partial \mu}$ term has been included for you.
```
# GRADED FUNCTION
# Complete the expression for the Jacobian, the first term is done for you.
# Implement the second.
# === COMPLETE THIS FUNCTION ===
def steepest_step (x, y, mu, sig, aggression) :
J = np.array([
-2*(y - f(x,mu,sig)) @ dfdmu(x,mu,sig),
-2*(y - f(x,mu,sig)) @ dfdsig(x, mu, sig)# Replace the ??? with the second element of the Jacobian.
])
step = -J * aggression
return step
```
## Test your code before submission
To test the code you've written above, run all previous cells (select each cell, then press the play button [ ▶| ] or press shift-enter).
You can then use the code below to test out your function.
You don't need to submit these cells; you can edit and run them as much as you like.
```
# First get the heights data, ranges and frequencies
x,y = heights_data()
# Next we'll assign trial values for these.
mu = 155 ; sig = 6
# We'll keep a track of these so we can plot their evolution.
p = np.array([[mu, sig]])
# Plot the histogram for our parameter guess
histogram(f, [mu, sig])
# Do a few rounds of steepest descent.
for i in range(50) :
dmu, dsig = steepest_step(x, y, mu, sig, 2000)
mu += dmu
sig += dsig
p = np.append(p, [[mu,sig]], axis=0)
# Plot the path through parameter space.
contour(f, p)
# Plot the final histogram.
histogram(f, [mu, sig])
```
Note that the path taken through parameter space is not necesarily the most direct path, as with steepest descent we always move perpendicular to the contours.
| github_jupyter |
# Mean Shift using Robust Scaler
This Code template is for the Cluster analysis using a simple Mean Shift(Centroid-Based Clustering using a flat kernel) Clustering algorithm along with feature scaling using Robust Scaler and includes 2D and 3D cluster visualization of the Clusters.
### Required Packages
```
!pip install plotly
import operator
import warnings
import itertools
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import RobustScaler
import plotly.express as px
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import plotly.graph_objects as go
from sklearn.cluster import MeanShift, estimate_bandwidth
warnings.filterwarnings("ignore")
```
### Initialization
Filepath of CSV file
```
file_path = ""
```
List of features which are required for model training
```
features=[]
```
### Data Fetching
Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
```
df=pd.read_csv(file_path)
df.head()
```
### Feature Selections
It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
We will assign all the required input features to X.
```
X = df[features]
```
### Data Preprocessing
Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
```
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
```
Calling preprocessing functions on the feature and target set.
```
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
X.head()
```
####Feature Scaling
Robust Scaler - Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to the quantile range (defaults to IQR: Interquartile Range). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile).<br>
[For more information click here](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html)
```
scaler = RobustScaler()
X_scaled = scaler.fit_transform(X)
```
### Model
Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover “blobs” in a smooth density of samples. It is a centroid-based algorithm, which works by updating candidates for centroids to be the mean of the points within a given region. These candidates are then filtered in a post-processing stage to eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
[More information](https://analyticsindiamag.com/hands-on-tutorial-on-mean-shift-clustering-algorithm/)
#### Tuning Parameters
1. bandwidthfloat, default=None
> Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using sklearn.cluster.estimate_bandwidth
2. seedsarray-like of shape (n_samples, n_features), default=None
> Seeds used to initialize kernels. If not set, the seeds are calculated by clustering.get_bin_seeds with bandwidth as the grid size and default values for other parameters.
3. bin_seedingbool, default=False
> If true, initial kernel locations are not locations of all points, but rather the location of the discretized version of points, where points are binned onto a grid whose coarseness corresponds to the bandwidth.
4. min_bin_freqint, default=1
> To speed up the algorithm, accept only those bins with at least min_bin_freq points as seeds.
5. cluster_allbool, default=True
> If true, then all points are clustered, even those orphans that are not within any kernel. Orphans are assigned to the nearest kernel. If false, then orphans are given cluster label -1
6. n_jobsint, default=None
> The number of jobs to use for the computation. This works by computing each of the n_init runs in parallel. None means 1 unless in a joblib.parallel_backend context. -1 means using all processors.
7. max_iterint, default=300
> Maximum number of iterations, per seed point before the clustering operation terminates
[For more detail on API](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.MeanShift.html)
<br>
<br>
####Estimate Bandwidth
Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large datasets, it’s wise to set that parameter to a small value.
```
bandwidth = estimate_bandwidth(X_scaled, quantile=0.15)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X_scaled)
y_pred = ms.predict(X_scaled)
```
### Cluster Analysis
First, we add the cluster labels from the trained model into the copy of the data frame for cluster analysis/visualization.
```
ClusterDF = X.copy()
ClusterDF['ClusterID'] = y_pred
ClusterDF.head()
```
#### Cluster Records
The below bar graphs show the number of data points in each available cluster.
```
ClusterDF['ClusterID'].value_counts().plot(kind='bar')
```
#### Cluster Plots
Below written functions get utilized to plot 2-Dimensional and 3-Dimensional cluster plots on the available set of features in the dataset. Plots include different available clusters along with cluster centroid.
```
def Plot2DCluster(X_Cols,df):
for i in list(itertools.combinations(X_Cols, 2)):
plt.rcParams["figure.figsize"] = (8,6)
xi,yi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1])
for j in df['ClusterID'].unique():
DFC=df[df.ClusterID==j]
plt.scatter(DFC[i[0]],DFC[i[1]],cmap=plt.cm.Accent,label=j)
plt.scatter(ms.cluster_centers_[:,xi],ms.cluster_centers_[:,yi],marker="^",color="black",label="centroid")
plt.xlabel(i[0])
plt.ylabel(i[1])
plt.legend()
plt.show()
def Plot3DCluster(X_Cols,df):
for i in list(itertools.combinations(X_Cols, 3)):
xi,yi,zi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1]),df.columns.get_loc(i[2])
fig,ax = plt.figure(figsize = (16, 10)),plt.axes(projection ="3d")
ax.grid(b = True, color ='grey',linestyle ='-.',linewidth = 0.3,alpha = 0.2)
for j in df['ClusterID'].unique():
DFC=df[df.ClusterID==j]
ax.scatter3D(DFC[i[0]],DFC[i[1]],DFC[i[2]],alpha = 0.8,cmap=plt.cm.Accent,label=j)
ax.scatter3D(ms.cluster_centers_[:,xi],ms.cluster_centers_[:,yi],ms.cluster_centers_[:,zi],
marker="^",color="black",label="centroid")
ax.set_xlabel(i[0])
ax.set_ylabel(i[1])
ax.set_zlabel(i[2])
plt.legend()
plt.show()
def Plotly3D(X_Cols,df):
for i in list(itertools.combinations(X_Cols,3)):
xi,yi,zi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1]),df.columns.get_loc(i[2])
fig1 = px.scatter_3d(ms.cluster_centers_,x=ms.cluster_centers_[:,xi],y=ms.cluster_centers_[:,yi],
z=ms.cluster_centers_[:,zi])
fig2=px.scatter_3d(df, x=i[0], y=i[1],z=i[2],color=df['ClusterID'])
fig3 = go.Figure(data=fig1.data + fig2.data,
layout=go.Layout(title=go.layout.Title(text="x:{}, y:{}, z:{}".format(i[0],i[1],i[2])))
)
fig3.show()
sns.set_style("whitegrid")
sns.set_context("talk")
plt.rcParams["lines.markeredgewidth"] = 1
sns.pairplot(data=ClusterDF, hue='ClusterID', palette='Dark2', height=5)
Plot2DCluster(X.columns,ClusterDF)
Plot3DCluster(X.columns,ClusterDF)
Plotly3D(X.columns,ClusterDF)
```
#### [Created by Anu Rithiga](https://github.com/iamgrootsh7)
| github_jupyter |
# This notebook explores the Energy Preserving Neural Network Idea!
-------------------------------------------------------------------------------------------------------------------
# Dataset used => MNIST
-------------------------------------------------------------------------------------------------------------------
# Technology used => TensorFlow
This model is deeper than the earlier.
```
# import all the required packages:
# packages used for processing:
from __future__ import print_function # making backward compatible
import matplotlib.pyplot as plt # for visualization
import numpy as np
# THE TensorFlow framework
import tensorflow as tf
# use the tensorflow's archived version of the MNIST dataset
from tensorflow.examples.tutorials.mnist import input_data
# for operating system related stuff
import os
import sys # for memory usage of objects
from subprocess import check_output
# to plot the images inline
%matplotlib inline
# Input data files are available in the "../Data/" directory.
def exec_command(cmd):
'''
function to execute a shell command and see it's
output in the python console
@params
cmd = the command to be executed along with the arguments
ex: ['ls', '../input']
'''
print(check_output(cmd).decode("utf8"))
# check the structure of the project directory
exec_command(['ls', '../..'])
# set a seed value for the script
seed_value = 3
np.random.seed(seed_value) # set this seed for a device independant consistent behaviour
''' Set the constants for the script '''
# various paths of the files
base_data_path = "../../Data" # the data path
base_model_path = "../../Models/IDEA_3"
# constant values for the script
num_digits = 10 # This is defined. There are 10 labels for 10 digits
img_dim = 28 # images are 28 x 28 sized
num_channels = 1 # images are grayscale
# Hyper parameters for tweaking.
# =================================================================================================================
training_batch_size = 64 # 64 images in each batch
no_of_epochs = 50
# network architecture related parameters:
''' Note that the number of layers will be fixed. you can tweak the number of hidden neurons in these layers: '''
num_hidden_lay_1 = 512
num_hidden_lay_2 = 512
num_hidden_lay_3 = 512
num_hidden_lay_4 = num_digits
# learning rate required for other optimizers:
learning_rate = 3e-4 # lolz! the karpathy constant
# =================================================================================================================
mnist = input_data.read_data_sets(os.path.join(base_data_path, "MNIST_data"), seed=seed_value, one_hot=True)
train_X = mnist.train.images; train_Y = mnist.train.labels
dev_X = mnist.validation.images; dev_Y = mnist.validation.labels
test_X = mnist.test.images; test_Y = mnist.test.labels
# print all the shapes:
print("Training Data shapes: ", train_X.shape, train_Y.shape)
print("Development Data shapes: ", dev_X.shape, dev_Y.shape)
print("Test Data shapes: ", test_X.shape, test_Y.shape)
# define the total_train_examples, total_dev_examples and the total_test_examples using the above arrays
total_train_examples = train_X.shape[0]
total_dev_examples = dev_X.shape[0]
total_test_examples = test_X.shape[0]
input_dimension = train_X.shape[1]
# just double checking if all the values are correct:
print("Training_data_size =", total_train_examples)
print("Development_data_size =", total_dev_examples)
print("Test_data_size =", total_test_examples)
print("Input data dimensions =", input_dimension)
''' Randomized cell: Behaviour changes upon running multiple times '''
random_index = np.random.randint(total_train_examples)
# bring the random image from the training data
random_image = train_X[random_index].reshape((img_dim, img_dim))
label_for_random_image = np.argmax(train_Y[random_index])
# display this random image:
plt.figure().suptitle("Image for digit: " + str(label_for_random_image))
plt.imshow(random_image);
```
## Experimentation:
In this notebook, I'll compare three different versions of neural networks. First is the usual neural network that uses batch_normalization and ReLU's everywhere. Second is the scattering network that fully preserves the energy (norm) of the input vector. Third is the hybrid of the two.
```
tf.reset_default_graph()
# define the placeholders for the model:
with tf.name_scope("Input_Placeholders"):
tf_input_images = tf.placeholder(tf.float32, shape=(None, input_dimension), name="input_images")
tf_input_labels = tf.placeholder(tf.float32, shape=(None, num_digits), name="input_labels")
# add input images summary:
input_image_summary = tf.summary.image("Input_images",
tf.reshape(tf_input_images, shape=(-1, img_dim, img_dim, num_channels)))
```
define the three layers for the modified neural network:
```
# define the first layer:
with tf.variable_scope("layer_1"):
# create the matrix variable
lay_1_connections = tf.get_variable("lay1_connections", shape=(num_hidden_lay_1, input_dimension),
dtype=tf.float32, initializer=tf.truncated_normal_initializer(seed=seed_value))
with tf.name_scope("transformed_weights"):
transformed_lay_1_connections = tf.nn.softmax(lay_1_connections, dim=0, name="softmax")
# define the outputs of the layer1:
lay_1_out = tf.matmul(transformed_lay_1_connections,
tf.transpose(tf_input_images)) # This is a simple matmul! no biases req.
# add histogram summary over the lay_1_connections:
lay_1_connections_summary = tf.summary.histogram("lay_1_connections", lay_1_connections)
# print the tensor shape of the lay_1_out
print("Layer_1 output:", lay_1_out)
# define the second layer:
with tf.variable_scope("layer_2"):
# create the matrix variable
lay_2_connections = tf.get_variable("lay2_connections", shape=(num_hidden_lay_2, num_hidden_lay_1),
dtype=tf.float32, initializer=tf.truncated_normal_initializer(seed=seed_value))
with tf.name_scope("transformed_weights"):
transformed_lay_2_connections = tf.nn.softmax(lay_2_connections, dim=0, name="softmax")
# define the outputs of the layer2:
lay_2_out = tf.matmul(transformed_lay_2_connections,
lay_1_out) # This is a simple matmul! no biases req.
lay_2_connections_summary = tf.summary.histogram("lay_2_connections", lay_2_connections)
# print the tensor shape of the lay_2_out
print("Layer_2 output:", lay_2_out)
# define the third layer:
with tf.variable_scope("layer_3"):
# create the matrix variable
lay_3_connections = tf.get_variable("lay3_connections", shape=(num_hidden_lay_3, num_hidden_lay_2),
dtype=tf.float32, initializer=tf.truncated_normal_initializer(seed=seed_value))
with tf.name_scope("transformed_weights"):
transformed_lay_3_connections = tf.nn.softmax(lay_3_connections, dim=0, name="softmax")
# define the outputs of the layer3:
lay_3_out = tf.matmul(transformed_lay_3_connections,
lay_2_out) # This is a simple matmul! no biases req.
lay_3_connections_summary = tf.summary.histogram("lay_3_connections", lay_3_connections)
# print the tensor shape of the lay_3_out
print("Layer_3 output:", lay_3_out)
# define the final layer:
with tf.variable_scope("layer_4"):
# create the matrix variable
lay_4_connections = tf.get_variable("lay4_connections", shape=(num_hidden_lay_4, num_hidden_lay_3),
dtype=tf.float32, initializer=tf.truncated_normal_initializer(seed=seed_value))
with tf.name_scope("transformed_weights"):
transformed_lay_4_connections = tf.nn.softmax(lay_4_connections, dim=0, name="softmax")
# define the outputs of the layer3:
lay_4_out = tf.matmul(transformed_lay_4_connections,
lay_3_out) # This is a simple matmul! no biases req.
lay_4_connections_summary = tf.summary.histogram("lay_4_connections", lay_4_connections)
# print the layer_4 output tensor
print("Layer_4 output:", lay_4_out)
# define the predictions obtained from these computations:
with tf.name_scope("Predictions"):
predictions = tf.nn.softmax(tf.transpose(lay_4_out))
# print the shape of the predictions:
print("Predictions:", predictions)
```
define the loss for optimization:
```
# loss definition:
with tf.name_scope("Loss"):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=tf.transpose(lay_4_out),
labels=tf_input_labels))
# attach a scalar summary over this:
loss_summary = tf.summary.scalar("Loss", loss)
# define the accuracy calculation module:
with tf.name_scope("Accuracy"):
correct = tf.equal(tf.argmax(predictions, axis=-1), tf.argmax(tf_input_labels, axis=-1))
accuracy = tf.reduce_mean(tf.cast(correct, dtype=tf.float32))
# attach a scalar summary for the accuracy calculates
accuracy_summary = tf.summary.scalar("Accuracy", accuracy)
# define the trainer of the operation
with tf.name_scope("Trainer"):
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
# define the tensorflow errands
with tf.name_scope("Errands"):
init = tf.global_variables_initializer()
all_summaries = tf.summary.merge_all()
```
### Start the session to train the model
```
# define the model to train:
model_name = "Model_fully_energy_preserving_4_deep"
# generate the model saving path:
model_save_path = os.path.join(base_model_path, model_name)
with tf.Session() as sess:
# create a tensorboard writer
tensorboard_writer = tf.summary.FileWriter(logdir=model_save_path, graph=sess.graph, filename_suffix=".bot")
# create a saver
saver = tf.train.Saver(max_to_keep=3)
# restore the session if the checkpoint exists:
if(os.path.isfile(os.path.join(model_save_path, "checkpoint"))):
saver.restore(sess, tf.train.latest_checkpoint(model_save_path))
else: # initialize all the variables:
sess.run(init)
global_step = 0
print("Starting the training process . . .")
for epoch in range(no_of_epochs):
# run through the batches of the data:
accuracies = [] # initialize this to an empty list
for batch in range(int(np.ceil(float(total_train_examples) / training_batch_size))):
start = batch * training_batch_size; end = start + training_batch_size
# extract the relevant data:
batch_data_X = train_X[start: end]
batch_data_Y = train_Y[start: end]
# This is batch gradient descent:
_, cost, acc, sums = sess.run([train_step, loss, accuracy, all_summaries],
feed_dict={tf_input_images: batch_data_X,
tf_input_labels: batch_data_Y})
# append the acc to the accuracies list
accuracies.append(acc)
# save the summarys
tensorboard_writer.add_summary(sums, global_step)
# increment the global step
global_step += 1
print("epoch =", epoch, "\tcost =", cost)
# evaluate the accuracy of the training epoch:
print("accuracy =", sum(accuracies) / len(accuracies))
# evaluate the dev-set accuracy:
dev_acc = sess.run(accuracy, feed_dict={tf_input_images: dev_X, tf_input_labels: dev_Y})
print("Obtained Dev accuracy = ", dev_acc)
# save the model after every epoch
saver.save(sess, os.path.join(model_save_path, model_name), global_step=(epoch + 10))
print("Training complete . . .")
# Once, the training is complete:
# print the test accuracy:
test_acc = sess.run(accuracy, feed_dict={tf_input_images: test_X, tf_input_labels: test_Y})
print("Obtained Test accuracy = ", test_acc)
```
| github_jupyter |
### Load preprocessed data
Run the script that downloads and processes the MovieLens data.
Uncomment it to run the download & processing script.
```
# !python ../src/download.py
import numpy as np
from sklearn.model_selection import train_test_split
from torch import from_numpy
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from torch.utils.data import BatchSampler
from torch.utils.data import RandomSampler
fh = np.load('data/dataset.npz')
# We have a bunch of feature columns and last column is the y-target
# Note pytorch is finicky about need int64 types
train_x = fh['train_x'].astype(np.int64)
train_y = fh['train_y']
# We've already split into train & test
X_test = fh['test_x'].astype(np.int64)
Y_test = fh['test_y']
X_train, X_val, Y_train, Y_val = train_test_split(train_x, train_y)
n_user = int(fh['n_user'])
n_item = int(fh['n_item'])
# columns are user_id, item_id and other features
# we won't use the 3rd and 4th columns
print(X_train)
print(' ')
print(Y_train)
def dataloader(*arrs, batch_size=32):
dataset = TensorDataset(*arrs)
bs = BatchSampler(RandomSampler(dataset),
batch_size=batch_size, drop_last=False)
return DataLoader(dataset, batch_sampler=bs, num_workers=8)
train = dataloader(from_numpy(X_train), from_numpy(Y_train))
test = dataloader(from_numpy(X_test), from_numpy(Y_test))
val = dataloader(from_numpy(X_val), from_numpy(Y_val))
from abstract_model import AbstractModel
import torch
from torch import nn
import torch.nn.functional as F
import pytorch_lightning as pl
from pytorch_lightning.loggers import TensorBoardLogger
def l2_regularize(array):
return torch.sum(array ** 2.0)
class MF(AbstractModel):
def __init__(self, n_user, n_item, k=18, c_vector=1.0, c_bias=1.0, batch_size=128):
super().__init__()
# These are simple hyperparameters
self.k = k
self.n_user = n_user
self.n_item = n_item
self.c_vector = c_vector
self.c_bias = c_bias
self.batch_size = batch_size
self.save_hyperparameters()
# These are learned and fit by PyTorch
self.user = nn.Embedding(n_user, k)
self.item = nn.Embedding(n_item, k)
# We've added new terms here:
self.bias_user = nn.Embedding(n_user, 1)
self.bias_item = nn.Embedding(n_item, 1)
self.bias = nn.Parameter(torch.ones(1))
def forward(self, inputs):
# This is the most import function in this script
# These are the user indices, and correspond to "u" variable
user_id = inputs[:, 0]
# Item indices, correspond to the "i" variable
item_id = inputs[:, 1]
# vector user = p_u
vector_user = self.user(user_id)
# vector item = q_i
vector_item = self.item(item_id)
# this is a dot product & a user-item interaction: p_u * q_i
ui_interaction = torch.sum(vector_user * vector_item, dim=1)
# Pull out biases
# bias_user shape (bs, 1)
# bias_user.squeeze() shape (bs,)
bias_user = self.bias_user(user_id).squeeze()
bias_item = self.bias_item(item_id).squeeze()
biases = (self.bias + bias_user + bias_item)
# Add bias prediction to the interaction prediction
prediction = ui_interaction + biases
return prediction
def loss(self, prediction, target):
# MSE error between target = R_ui and prediction = p_u * q_i
loss_mse = F.mse_loss(prediction, target.squeeze())
return loss_mse, {"mse": loss_mse}
def reg(self):
# Add new regularization to the biases
reg_bias_user = l2_regularize(self.bias_user.weight) * self.c_bias
reg_bias_item = l2_regularize(self.bias_item.weight) * self.c_bias
# Compute L2 reularization over user (P) and item (Q) matrices
reg_user = l2_regularize(self.user.weight) * self.c_vector
reg_item = l2_regularize(self.item.weight) * self.c_vector
# Add up the MSE loss + user & item regularization
log = {"reg_user": reg_user, "reg_item": reg_item,
"reg_bias_user": reg_bias_user, "reg_bias_item": reg_bias_item}
total = reg_user + reg_item + reg_bias_user + reg_bias_item
return total, log
```
#### Optimize hyperparameters
```
import optuna
from pytorch_lightning.loggers.wandb import WandbLogger
def objective(trial):
# Sample parameters -- without declaring them in advance!
k = trial.suggest_int('n_hid', 1, 20)
# pretty good params are c_bias = 5e-8, c_vector=1e-5, nhid=5
c_vector = trial.suggest_loguniform('c_vector', 1e-8, 1e-1)
c_bias = trial.suggest_loguniform('c_bias', 1e-8, 1e-1)
model = MF(n_user, n_item, k=k, c_bias=c_bias, c_vector=c_vector,
batch_size=1024)
model.save_data(train_x, train_y, test_x, test_y)
# add a logger
logger = WandbLogger(name="02_mf", project="simple_mf")
trainer = pl.Trainer(max_epochs=100, logger=logger,
early_stop_callback=True,
gpus=1,
progress_bar_refresh_rate=1)
trainer.fit(model)
results = trainer.test(model)
return results['avg_test_loss']
study = optuna.create_study(storage='sqlite:///02.db',
study_name='no-name-1f329d04-352a-4e7f-afb3-546b8be0cfab',
load_if_exists=True)
study.trials_dataframe()
best_mse = study.best_trial.value
best_rmse = np.sqrt(best_mse)
best_rmse
study.best_params
study.optimize(objective, n_trials=100)
```
#### Train a model with the best hyperparameters
```
from pytorch_lightning.loggers.wandb import WandbLogger
k = 5
c_vector = 1e-5
c_bias = 5e-8
model = MF(n_user, n_item, k=k, c_bias=c_bias, c_vector=c_vector,
batch_size=1024)
# add a logger
logger = WandbLogger(name="02_mf", project="simple_mf")
trainer = pl.Trainer(max_epochs=100, logger=logger,
early_stop_callback=True,
progress_bar_refresh_rate=1)
trainer.fit(model, train, val)
trainer.save_checkpoint("02_best")
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import scipy as sp
import sklearn as sl
import seaborn as sns; sns.set()
import matplotlib as mpl
from sklearn.linear_model import LinearRegression
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
%matplotlib inline
```
# Tarea 3: Encuentre la regresión
Ud recibe unos datos $x$ y $y$ cómo se muestran a continuación. Ud debe responder cuatro preguntas a partir de estos datos. Suponga que ud tiene un modelo tal que $y=f(x)$ más aún desconoce $f$.
```
df = pd.read_pickle('ex1.gz')
sns.scatterplot(x='x',y='y',data=df)
plt.show()
df
```
## (A) Pendiente e intercepto
Determine la pendiente de los datos en el intervalo $[0,1.5]$ y el valor del intercepto con el eje $y$. Es decir, $f(0)=?$. ¿Cuál es el valor de $r^2$?
```
intervalo = df[(df.x >= 0) & (df.x <= 1.5)]
intervalo
x1= intervalo['x'].values.reshape(-1,1)
y1= intervalo['y'].values.reshape(-1,1)
ecuacion = LinearRegression()
ecuacion.fit(x1,y1)
intercepto = ecuacion.intercept_
mpendiente = ecuacion.coef_
r2 = ecuacion.score(x1,y1)
print("La Pendiente es : ", mpendiente)
print("El Intercepto es : ", intercepto)
print("R^2: ", r2)
```
## (B) Regresión polinomial
Suponga que quiere realizar la siguiente regresión polinomial,
$$y=\beta_1+\beta_2x+\beta_2x^2+\beta_2x^3+\beta_2x^4+\beta_2x^5.$$
Plantee la función de costo que le permita calcular los coeficientes y calcule $\beta_1$, $\beta_2$, $\beta_3$, $\beta_4$, y $\beta_5$. ¿Cuál es el $r^2$?
Calcule $f(0)$ y compare con los resultados anteriores
```
Y = df.loc[:, ['y']]
Y
def Mat(x,A,b):
m,n = A.shape
X = np.matrix(x).T
DeltaB=(A*X-b)
return (DeltaB.T*DeltaB)[0,0]/m
X = df.loc[:, ['x']].rename(columns={'x': 'x1'})
X.insert(0, 'x0', 1)
X['x2'] = X['x1']*X['x1']
X['x3'] = X['x1']*X['x1']*X['x1']
X['x4'] = X['x1']*X['x1']*X['x1']*X['x1']
X['x5'] = X['x1']*X['x1']*X['x1']*X['x1']*X['x1']
xini = X.to_numpy()
yini = Y.to_numpy()
optimizar = sp.optimize.minimize(fun=Mat,x0=np.zeros(xini.shape[1]), args = (xini,yini), tol=1e-10)
print("Los valores de los coeficientes son : ",optimizar['x'])
print("El valor para f(0):",optimizar['x'][0])
y = df["y"]
b = np.linspace(0,4,100)
def func(a,b,c,d,e,f,x):
return a*x**5 + b*x**4 + c*x**3 + d*x**2 + e*x + f
pol = func(optimizar['x'][5],optimizar['x'][4],optimizar['x'][3],optimizar['x'][2],optimizar['x'][1],optimizar['x'][0],b)
r2 = 1-np.sum((pol-y)**2)/np.sum((y-y.mean())**2)
r2
print ('El valor para r^2 es 0.9111078478152879')
```
## (C) Regresión polinomial exacta
Resulta, que cuando se quiere hacer alguna regresión polinomial esta se puede hacer de forma exacta. ¿Cómo? Suponga que ud va a considerar que su problema en lugar de tener $1$ variable ($x$) tiene $n+1$, siendo $n$ el orden del polinomio a ajustar. Es decir, sus nuevas variables van a ser $\{x_0,\,x_1,\,x_2,\,x_3,\dots,\,x_n\}$ definiendo $x_j=x^j$. Así pues, siguiendo el mismo procedimiento para la regresión lineal multidimensional que realizamos para el ejercicio de datos inmobiliarios, puede encontrar los valores de los coeficientes $\beta_1$, $\beta_2$, $\beta_3$, $\beta_4$, y $\beta_5$. Encuentre estos valores y compare con los resultados en la sección **(B)**.
Calcule $f(0)$ y compare con los resultados anteriores.
> Si ud se pregunta si esto es posible la respuesta es sí. Inclusive, esto se puede extender a cualquier a cualquier conjunto de funciones, tal que $x_j=f_j(x)$, que represente un conjunto "linealmente independiente" (¡Me estoy adelantando a *Fourier*!). Para quienes quieran explorar algunas curiosidades matemáticas, cuando $n+1$ es igual al número de puntos o valores de $x$ (y todos diferentes) la matriz es siempre invertible y resulta ser la inversa de una matriz de Vandermonde.
```
puntoc = np.linalg.inv(xini.T @ xini) @ xini.T @ yini
b0, b1, b2, b3, b4, b5 = puntoc
coeficientes = str(b0) +','+ str(b1) + ',' + str(b2) + ',' + str(b3) + ',' + str(b4) + ',' + str(b5)
print(f"Los valores de los Coeficientes son = {coeficientes}")
print(f"El valor para f(0) es :", puntoc[0])
print ('Como podemos observar ambos métodos arrojan valores muy parecidos al menos en los primeros 3 decimales, después existen diferencias en algunas cifras de dichos decimales.Aún así podemos confirmar nuestro valor para f(0) ')
```
## (D) Regresión a un modelo teórico
Suponga que su modelo teórico es el siguiente:
$$y=\frac{a}{\left[(x-b)^2+c\right]^\gamma}.$$
Halle $a$, $b$, $c$ y $\gamma$.
Calcule $f(0)$ y compare con los resultados anteriores
```
def ending (i2,x,y):
dy = f(i2,x) - y
return np.dot(dy,dy)/len(y)
def ecua(i,x):
return (i[0])/((x-i[1])*(x-i[1]) + i[2])*i[3]
x = df["x"]
opti = sp.optimize.minimize(fun=ending, x0=np.array([0,0,1,0]), args = (x,y), method='L-BFGS-B', tol=1e-8)
print("Los valores para a,b,c,𝛾 son:",opti['x'])
print("El valor para f(0) es:", ecua(opti.x,0))
print('Finalmente, nos damos cuenta que este último método es el más alejado de los otros dos para f(0) comparados con los que se hicieron anteriormente, por lo que este podría ser un método menos favorable para calcular f(0). No obstante, las regresiones polinomial y polinomial exacta fueron acertadas')
```
| github_jupyter |
# Adding Object Detection Predictions to a Voxel51 Dataset
This notebook will add predictions from an object detection model to the samples in a Voxel51 Dataset.
Adapted from: https://voxel51.com/docs/fiftyone/recipes/model_inference.html
```
model_path = '/tf/model-export/lb-400images-efficientdet-d0-model/image_tensor_saved_model/saved_model' # The path of the saved Object Detection model
dataset_name = "jsm-test-dataset" # Name of the Voxel51 Dataset to use
field_name = "dolt_bg_predict" # Name of the field to store the predictions in
labelmap_file = '/tf/dataset-export/lb-400images-efficientdet-d0-model/label_map.pbtxt' # the location of the labelmap file to use
min_score = 0.8 # This is the minimum score for adding a prediction. This helps keep out bad predictions but it may need to be adjusted if your model is not that good yet.
# dimensions of images
#config
import fiftyone as fo
import os
dataset = fo.load_dataset(dataset_name)
import io
import os
import scipy.misc
import numpy as np
import six
import time
from six import BytesIO
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from PIL import Image, ImageDraw, ImageFont
from object_detection.utils import label_map_util
import tensorflow as tf
from object_detection.utils import visualization_utils as viz_utils
# small function that preprocesses the images so that the model can read them in
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
%matplotlib inline
```
### Load saved model
Loading a saved objection detection model is a little weird. I found some info on it:
https://github.com/tensorflow/models/blob/master/research/object_detection/colab_tutorials/inference_from_saved_model_tf2_colab.ipynb
```
start_time = time.time()
tf.keras.backend.clear_session()
detect_fn = tf.saved_model.load(model_path)
end_time = time.time()
elapsed_time = end_time - start_time
print('Elapsed time: ' + str(elapsed_time) + 's')
```
### Load the LabelMap file
```
label_map = label_map_util.load_labelmap(labelmap_file)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=100)
category_index = label_map_util.create_category_index(categories)
def findClassName(class_id):
return category_index[class_id]["name"]
```
## Add predictions
Itterate through all the samples, run them through the model and add the predictions to the sample
### Predictions with Tiling
Use tiling to break up large images to sizes closer to the input tensor of the model.
```
# from: https://github.com/google-coral/pycoral/blob/master/examples/small_object_detection.py
import collections
Object = collections.namedtuple('Object', ['label', 'score', 'bbox'])
def tiles_location_gen(img_size, tile_size, overlap):
"""Generates location of tiles after splitting the given image according the tile_size and overlap.
Args:
img_size (int, int): size of original image as width x height.
tile_size (int, int): size of the returned tiles as width x height.
overlap (int): The number of pixels to overlap the tiles.
Yields:
A list of points representing the coordinates of the tile in xmin, ymin,
xmax, ymax.
"""
tile_width, tile_height = tile_size
img_width, img_height = img_size
h_stride = tile_height - overlap
w_stride = tile_width - overlap
for h in range(0, img_height, h_stride):
for w in range(0, img_width, w_stride):
xmin = w
ymin = h
xmax = min(img_width, w + tile_width)
ymax = min(img_height, h + tile_height)
yield [xmin, ymin, xmax, ymax]
def non_max_suppression(objects, threshold):
"""Returns a list of indexes of objects passing the NMS.
Args:
objects: result candidates.
threshold: the threshold of overlapping IoU to merge the boxes.
Returns:
A list of indexes containings the objects that pass the NMS.
"""
if len(objects) == 1:
return [0]
boxes = np.array([o.bbox for o in objects])
xmins = boxes[:, 0]
ymins = boxes[:, 1]
xmaxs = boxes[:, 2]
ymaxs = boxes[:, 3]
areas = (xmaxs - xmins) * (ymaxs - ymins)
scores = [o.score for o in objects]
idxs = np.argsort(scores)
selected_idxs = []
while idxs.size != 0:
selected_idx = idxs[-1]
selected_idxs.append(selected_idx)
overlapped_xmins = np.maximum(xmins[selected_idx], xmins[idxs[:-1]])
overlapped_ymins = np.maximum(ymins[selected_idx], ymins[idxs[:-1]])
overlapped_xmaxs = np.minimum(xmaxs[selected_idx], xmaxs[idxs[:-1]])
overlapped_ymaxs = np.minimum(ymaxs[selected_idx], ymaxs[idxs[:-1]])
w = np.maximum(0, overlapped_xmaxs - overlapped_xmins)
h = np.maximum(0, overlapped_ymaxs - overlapped_ymins)
intersections = w * h
unions = areas[idxs[:-1]] + areas[selected_idx] - intersections
ious = intersections / unions
idxs = np.delete(
idxs, np.concatenate(([len(idxs) - 1], np.where(ious > threshold)[0])))
return selected_idxs
def reposition_bounding_box(bbox, tile_location):
"""Relocates bbox to the relative location to the original image.
Args:
bbox (int, int, int, int): bounding box relative to tile_location as xmin,
ymin, xmax, ymax.
tile_location (int, int, int, int): tile_location in the original image as
xmin, ymin, xmax, ymax.
Returns:
A list of points representing the location of the bounding box relative to
the original image as xmin, ymin, xmax, ymax.
"""
bbox[0] = bbox[0] + tile_location[0]
bbox[1] = bbox[1] + tile_location[1]
bbox[2] = bbox[2] + tile_location[0]
bbox[3] = bbox[3] + tile_location[1]
return bbox
def get_resize(input_size, img_size):
"""Copies a resized and properly zero-padded image to a model's input tensor.
Args:
interpreter: The ``tf.lite.Interpreter`` to update.
size (tuple): The original image size as (width, height) tuple.
resize: A function that takes a (width, height) tuple, and returns an
image resized to those dimensions.
Returns:
The resized tensor with zero-padding as tuple
(resized_tensor, resize_ratio).
"""
width, height = input_size
w, h = img_size
scale = min(width / w, height / h)
print(scale)
w, h = int(w * scale), int(h * scale)
return w,h
tensor = input_tensor(interpreter)
tensor.fill(0) # padding
_, _, channel = tensor.shape
result = resize((w, h))
tensor[:h, :w] = np.reshape(result, (h, w, channel))
return result, (scale, scale)
# remove the older labels
dataset.delete_sample_field(field_name)
view = dataset.shuffle() # Adjust the view as needed
tile_string="1920x1080,768x768"
tile_overlap=50
iou_threshold=0
for sample in view.select_fields("filepath"):
start_time = time.time()
img = load_img(sample.filepath,)
img_size = img.size
img_width, img_height = img_size
objects_by_label = dict()
exportDetections = []
tile_sizes = []
for tile_size in tile_string.split(','):
tile_size=tile_size.split('x')
tile_sizes.append([int(tile_size[0]),int(tile_size[1])])
#print(tile_sizes)
#tile_sizes = [map(int, tile_size.split('x')) for tile_size in tile_string.split(',')]
for tile_size in tile_sizes:
tile_width, tile_height = tile_size
for tile_location in tiles_location_gen(img_size, tile_size, tile_overlap):
tile = img.crop(tile_location)
old_size = tile.size # old_size[0] is in (width, height) format
ratio = float(512)/max(old_size)
if (ratio > 1):
continue
new_size = tuple([int(x*ratio) for x in old_size])
im = tile.resize(new_size, Image.ANTIALIAS)
# create a new image and paste the resized on it
new_im = Image.new("RGB", (512, 512))
new_im.paste(im, (0,0)) #((512-new_size[0])//2, (512-new_size[1])//2))
img_array = img_to_array(new_im,dtype='uint8')
img_batch = np.array([img_array])
detections = detect_fn(img_batch)
for i, detectScore in enumerate(detections['detection_scores'][0]):
if detectScore > min_score:
x1 = detections['detection_boxes'][0][i][1].numpy() * 512 #tile_width
y1 = detections['detection_boxes'][0][i][0].numpy() * 512 #tile_height
x2 = detections['detection_boxes'][0][i][3].numpy() * 512 #tile_width
y2 = detections['detection_boxes'][0][i][2].numpy() * 512 #tile_height
bbox = [x1,y1,x2,y2]
#draw = ImageDraw.Draw(new_im)
#draw.rectangle((x1,y1,x2,y2),outline="red")
#new_im.show()
scaled_bbox = []
for number in bbox:
scaled_bbox.append(number / ratio)
repositioned_bbox = reposition_bounding_box(scaled_bbox, tile_location)
#print("tile size: {} tile_location: {} ratio: {}".format(tile_size, tile_location, ratio))
#print("bbox: {} scaled_bbox: {} ".format(bbox, scaled_bbox))
#print("repositiond: {}".format(repositioned_bbox))
#draw = ImageDraw.Draw(img)
#draw.rectangle(repositioned_bbox,outline="red")
#img.show()
confidence = detections['detection_scores'][0][i]
label = findClassName(int(detections['detection_classes'][0][i]))
objects_by_label.setdefault(label,[]).append(Object(label, confidence, repositioned_bbox))
#img = load_img(sample.filepath,)
#draw = ImageDraw.Draw(img)
for label, objects in objects_by_label.items():
idxs = non_max_suppression(objects, iou_threshold)
for idx in idxs:
#print(objects[idx])
x1 = objects[idx].bbox[0] / img_width
y1 = objects[idx].bbox[1] / img_height
x2 = objects[idx].bbox[2] / img_width
y2 = objects[idx].bbox[3] / img_height
#draw.rectangle((objects[idx].bbox[0],objects[idx].bbox[1],objects[idx].bbox[2],objects[idx].bbox[3]),outline="red")
w = x2 - x1
h = y2 - y1
bbox = [x1, y1, w, h]
exportDetections.append( fo.Detection(label=objects[idx].label, bounding_box=bbox, confidence=objects[idx].score))
#img.show()
# Store detections in a field name of your choice
sample[field_name] = fo.Detections(detections=exportDetections)
sample.save()
end_time = time.time()
print("Total time: {}".format(end_time-start_time))
```
## Run Prediction against the whole image
This approach is faster but misses small objects
```
view = dataset.shuffle() # Adjust the view as needed
start_time = time.time()
for sample in view.select_fields("filepath"):
img = load_img(sample.filepath)
img_array = img_to_array(img)
input_tensor = np.expand_dims(img_array, 0)
detections = detect_fn(input_tensor)
for i, detectScore in enumerate(detections['detection_scores'][0]):
if detectScore > min_score:
print("\t- {}: {}".format(findClassName(int(detections['detection_classes'][0][i])), detections['detection_scores'][0][i]))
label = findClassName(int(detections['detection_classes'][0][i]))
confidence = detections['detection_scores'][0][i]
# TF Obj Detect bounding boxes are: [ymin, xmin, ymax, xmax]
# For Voxel 51 - Bounding box coordinates should be relative values
# in [0, 1] in the following format:
# [top-left-x, top-left-y, width, height]
x1 = detections['detection_boxes'][0][i][1]
y1 = detections['detection_boxes'][0][i][0]
x2 = detections['detection_boxes'][0][i][3]
y2 = detections['detection_boxes'][0][i][2]
w = x2 - x1
h = y2 - y1
bbox = [x1, y1, w, h]
exportDetections.append( fo.Detection(label=label, bounding_box=bbox, confidence=confidence))
# Store detections in a field name of your choice
sample[field_name] = fo.Detections(detections=exportDetections)
sample.save()
end_time = time.time()
print("Total time: {}".format(end_time-start_time))
```
# Examine the results
Here is some example code on how you could test how well the predictions match ground truth data.
## View the Results
Use the UI to examine the predictions. You can select poorly performing samples and tag them for relabeling.
```
session = fo.launch_app(dataset, auto=False)
print(dataset)
view = dataset.exists("predict_model")#.match({"relabel": {"$exists": False, "$eq": None}})
session = fo.launch_app(view, auto=False)
print(view)
#session.view = view
```
### Select Samples
Select poorly performing samples in the UI and then run to code below to tag the selected samples for relabeling.
```
# Create a view containing only the selected samples
selected_view = dataset.select(session.selected)
print(selected_view)
for sample in selected_view:
sample.tags.append("relabel")
sample.save()
predict_model_view = dataset.exists(field_name)
total=0
top3_total=0
for sample in predict_model_view:
top_detect = sample["predict_model"].detections[0]
bb_area = top_detect["bounding_box"][2] * top_detect["bounding_box"][3]
if sample["norm_model"].label==top_detect["label"]:
match="Match"
top3_match="Top3 Match"
total = total+1
top3_total=top3_total+1
found=True
top3_found=True
else:
match="!NO Match!"
top3_match="!NO TOP3 Match!"
found=False
top3_found=False
for i,guess in enumerate(sample["predict_model"].detections):
if i>3:
break
if sample["norm_model"].label==guess["label"]:
top3_match="Top3 Match"
top3_found=True
top3_total=top3_total+1
break
#print("{}\t{}\t\t{}\t\t{}".format(bb_area,sample["norm_model"].label,match,top3_match))
print("{}, {}, {}, {}".format(bb_area,sample["norm_model"].label,found,top3_found))
print("{}\n{}\n\n{}\n{}".format(total,100-total,top3_total,100-top3_total))
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.